1 编译
在 https://blog.csdn.net/qq43645149/article/details/130545239 的基础上
先设置一下环境变量
export RTE_SDK=/home/king/share/dpdk/dpdk-stable-19.08.2/
export RTE_TARGET=x86_64-native-linux-gcc
root@ubuntu:/home/king/share/cs/ustack-main# make
CC ustack.o
LD ustack
INSTALL-APP ustack
INSTALL-MAP ustack.map
2 运行程序看到的提示参考
root@ubuntu:/home/king/share/cs/ustack-main# ./build/ustack
EAL: Detected 4 lcore(s)
EAL: Detected 1 NUMA nodes
EAL: Multi-process socket /var/run/dpdk/rte/mp_socket
EAL: Selected IOVA mode 'PA'
EAL: Probing VFIO support...
EAL: VFIO support initialized
EAL: PCI device 0000:02:01.0 on NUMA socket -1
EAL: Invalid NUMA socket, default to 0
EAL: probe driver: 8086:100f net_e1000_em
EAL: PCI device 0000:02:06.0 on NUMA socket -1
EAL: Invalid NUMA socket, default to 0
EAL: probe driver: 8086:100f net_e1000_em
EAL: PCI device 0000:03:00.0 on NUMA socket -1
EAL: Invalid NUMA socket, default to 0
EAL: probe driver: 15ad:7b0 net_vmxnet3
EAL: PCI device 0000:0b:00.0 on NUMA socket -1
EAL: Invalid NUMA socket, default to 0
EAL: probe driver: 15ad:7b0 net_vmxnet3
3 代码部分
#include <stdio.h>
#include <rte_eal.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
#include <arpa/inet.h>
#define MBUF_COUNT 4096
#define BURST_SIZE 32
// eth0 , 0
// eth1 , 1
int gDpdkPortId = 0; // 这里是一个逻辑值,网口id(port id)
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = RTE_ETHER_MAX_LEN } // 这里只尝试接收数据
};
int main(int argc, char *argv[]) {
if (rte_eal_init(argc, argv) < 0) {
rte_exit(EXIT_FAILURE, "Failed to init EAL\n");
} //
struct rte_mempool *mbuf_pool = rte_pktmbuf_pool_create("mbufpool", MBUF_COUNT, 0, 0,
RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
struct rte_eth_conf port_conf = port_conf_default;
int num_rx_queues = 1; //这里只处里收
int num_tx_queues = 0; // 发送暂不处理
rte_eth_dev_configure(gDpdkPortId, num_rx_queues, num_tx_queues, &port_conf);
rte_eth_rx_queue_setup(gDpdkPortId, 0, 128, rte_eth_dev_socket_id(gDpdkPortId),
NULL, mbuf_pool);
rte_eth_dev_start(gDpdkPortId);
// tcp
while (1) {
struct rte_mbuf *mbufs[BURST_SIZE];
// 数据来源与 rte_eth_rx_queue_setup 中的 mbuf_pool环行buf中最多取BURST_SIZE个
unsigned num_recvd = rte_eth_rx_burst(gDpdkPortId, 0, mbufs, BURST_SIZE); // mbufs从池子中拿,无拷贝,不用释放
if (num_recvd > BURST_SIZE) {
rte_exit(EXIT_FAILURE, "Failed rte_eth_rx_burst\n");
}
unsigned i = 0;
// 解析收到的这些数据包
for (i = 0;i < num_recvd;i ++) {
// 解 以太网头
struct rte_ether_hdr *ehdr = rte_pktmbuf_mtod(mbufs[i], struct rte_ether_hdr *);
if (ehdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) {
// 解 IP头
struct rte_ipv4_hdr *iphdr = rte_pktmbuf_mtod_offset(mbufs[i],struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr));
if (iphdr->next_proto_id == IPPROTO_UDP) {
// tcp/ip, udp/ip
// 解 UDP头
struct rte_udp_hdr *udphdr = (struct rte_udp_hdr *)(iphdr + 1);
uint16_t length = ntohs(udphdr->dgram_len);
*((char*)udphdr + length) = '\0';
printf("data: %s\n", (char*)(udphdr + 1));
}
}
}
}
printf("hello ustack\n");
}
3.1 部分函数说明
struct rte_mempool mbuf_pool = rte_pktmbuf_pool_create("mbufpool", MBUF_COUNT, 0, 0,
RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
原型:struct rte_mempool rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id);
参数说明:
name:内存池的名称。
n:内存池中的元素数量。
cache_size:每个 CPU 缓存的大小,单位为元素数目,如果为 0 则表示禁用缓存。
priv_size:每个元素的私有数据空间大小。可以使用 0 表示没有私有数据。
data_room_size:每个元素中存储数据的空间大小。
socket_id:内存池所在的 NUMA 节点编号。
4 测试代码
4.1 设置虚拟机网卡
几个网络适配器都设置为NAT
4.2 在windows侧做虚拟机被绑定的网卡的ip地址到mac地址的映射
前置条件:vmware eth0是被绑定到了dpdk
eth0 Link encap:Ethernet HWaddr 00:0c:29:a3:11:bf
inet addr:192.168.241.133 Bcast:192.168.241.255 Mask:255.255.255.0
inet6 addr: fe80::20c:29ff:fea3:11bf/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:438 errors:0 dropped:0 overruns:0 frame:0
TX packets:25 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:54959 (54.9 KB) TX bytes:5958 (5.9 KB)
windows侧处理部分
windows上面
cmd中输入 arp -a 看结果里面有没有 ==192.168.241.133==
接口: 192.168.241.1 --- 0x9
Internet 地址 物理地址 类型
192.168.241.128 00-0c-29-a3-11-c9 动态
192.168.241.254 00-50-56-f5-29-de 动态
192.168.241.255 ff-ff-ff-ff-ff-ff 静态
224.0.0.22 01-00-5e-00-00-16 静态
224.0.0.251 01-00-5e-00-00-fb 静态
224.0.0.252 01-00-5e-00-00-fc 静态
239.255.255.250 01-00-5e-7f-ff-fa 静态
255.255.255.255 ff-ff-ff-ff-ff-ff 静态
发现,找不到==192.168.241.133==,但是我的dpdk小程序是跑在==192.168.241.133==上面的,怎么办?
这时候需要cmd中输入
netsh i i show in
Idx Met MTU 状态 名称
17 40 1500 connected WLAN
1 75 4294967295 connected Loopback Pseudo-Interface 1
19 25 1500 disconnected 本地连接 1
11 25 1500 disconnected 本地连接 2
22 25 1500 connected 以太网 2
5 35 1500 connected VMware Network Adapter VMnet1
==9== 35 1500 connected VMware Network Adapter VMnet8
找到对应的网卡的idx(根据虚拟机的配置),做一个ip地址到mac地址的映射(arp):(为了让pc端的网络助手与虚拟机上跑的dpdk程序通信)
netsh -c i i add neighbors 9 192.168.241.133 00:0c:29:a3:11:bf (后面两项来自 vmware eth0)
上面的格式不对,改为:
netsh -c i i add neighbors 9 192.168.241.133 00-0c-29-a3-11-bf
再次输入arp -a
接口: 192.168.241.1 --- ==0x9== (对应netsh -c i i add neighbors ==9== 192.168.241.133 00-0c-29-a3-11-bf 中的9)
Internet 地址 物理地址 类型
192.168.241.128 00-0c-29-a3-11-c9 动态
==192.168.241.133 00-0c-29-a3-11-bf 静态== ⇒ 这里就是做了arp映射的ip与mac了
192.168.241.254 00-50-56-f5-29-de 动态
192.168.241.255 ff-ff-ff-ff-ff-ff 静态
224.0.0.22 01-00-5e-00-00-16 静态
224.0.0.251 01-00-5e-00-00-fb 静态
224.0.0.252 01-00-5e-00-00-fc 静态
239.255.255.250 01-00-5e-7f-ff-fa 静态
255.255.255.255 ff-ff-ff-ff-ff-ff 静态
用网络调试助手测试 dpdk udp小程序
本机地址对应上面的192.168.241.1,远程主机是192.168.124.133,端口不限定
文章参考与<零声教育>的C/C++linux服务期高级架构系统教程学习:链接