0

在使用 PACKET_MMAP (PACKET_RX_RING) 捕获以太网数据包时,在 100KB/s 或更高的数据速率下,我有超过 50% 的数据包丢失。这种技术普遍吗?

将 PACKET_MMAP 与 PACKET_RX_RING 一起使用时,代码/参数/逻辑是否有任何改进的机会或空间以减少数据包丢失

#include <stdio.h>
#include <stdio.h>
#include <sys/socket.h>
#include <net/if.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/types.h>
#include <arpa/inet.h>
#include <sys/mman.h>
#include <poll.h>
#include <signal.h>



void handle_frame(struct tpacket_hdr* tphdr, struct sockaddr_ll* addr, char* l2content, char * l3content){

  if(tphdr->tp_status & TP_STATUS_USER){
    fwrite(l2content,tphdr->tp_snaplen,1,pcapfile);
    tphdr->tp_status = TP_STATUS_KERNEL;
  }
}

int main(){
  file1 = fopen("file1.cap","a+");



int fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (fd == -1) {
  perror("socket");
  exit(1);
 }

struct tpacket_req req = {0};
req.tp_frame_size = TPACKET_ALIGN(TPACKET_HDRLEN + ETH_HLEN) + TPACKET_ALIGN(1500);
req.tp_block_size = sysconf(_SC_PAGESIZE);
while (req.tp_block_size < req.tp_frame_size) {
  req.tp_block_size <<= 1;
 }
 req.tp_block_nr = 4;
size_t frames_per_buffer = req.tp_block_size / req.tp_frame_size;
req.tp_frame_nr = req.tp_block_nr * frames_per_buffer;

 int version = TPACKET_V1;
  (setsockopt(fd, SOL_PACKET, PACKET_VERSION, &version, sizeof(version));

 setsockopt(fd, SOL_PACKET, PACKET_RX_RING, (void*)&req, sizeof(req));

size_t rx_ring_size = req.tp_block_nr * req.tp_block_size;
char* rx_ring = mmap(0, rx_ring_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);

struct pollfd fds[1] = {0};
fds[0].fd = fd;
fds[0].events = POLLIN;
size_t frame_idx = 0;
char* frame_ptr = rx_ring;

while (1) {
  struct tpacket_hdr* tphdr = (struct tpacket_hdr*)frame_ptr;
  while (!(tphdr->tp_status & TP_STATUS_USER)) {
    if (poll(fds, 1, -1) == -1) {
      perror("poll");
      exit(1);
    }
  }

  struct sockaddr_ll* addr = (struct sockaddr_ll*)(frame_ptr + TPACKET_HDRLEN - sizeof(struct sockaddr_ll));
  char* l2content = frame_ptr + tphdr->tp_mac;
  char* l3content = frame_ptr + tphdr->tp_net;
  handle_frame(tphdr, addr, l2content, l3content);

  frame_idx = (frame_idx + 1) % req.tp_frame_nr;
  int buffer_idx = frame_idx / frames_per_buffer;
  char* buffer_ptr = rx_ring + buffer_idx * req.tp_block_size;
  int frame_idx_diff = frame_idx % frames_per_buffer;
  frame_ptr = buffer_ptr + frame_idx_diff * req.tp_frame_size;
 }

fflush(pcapfile);
fclose(pcapfile);
}
4

1 回答 1

0

消息板pana,

检查您的 RX 环设置:( tp_block_size = page size它等于标准 MTU = 1512 字节的几个以太网帧,对我来说,一帧是 2Kb 页面) tp_block_nr = 4- 帧数 4 - 注意它在物理上是不连续的空间!我认为你的缓冲环刚刚溢出。我真的建议你增加tp_block_size(我使用下一个,我的页面是 2Kb :)

tp.tp_block_size = BLOCK_SIZE; //(PAGE_2K * PAGE_2K)
tp.tp_block_nr = BLOCK_NR;     //BLOCK_NR            (1)
tp.tp_frame_size = PAGE_2K; /* Max size eth frame is 1522 bytes */
tp.tp_frame_nr = (tp.tp_block_size * tp.tp_block_nr) / tp.tp_frame_size;

并减少块数。

此外,尝试减少循环周期中的系统调用 - 我建议您在单独的线程中写入文件,因为它非常繁重的系统调用(以防万一您可以检查时间),另外,我建议您启用 promisc 模式在 eth - 添加到初始化代码:

struct packet_mreq mreq = {0};
mreq.mr_ifindex = if_idx.ifr_ifindex;
mreq.mr_type = PACKET_MR_PROMISC;
if (setsockopt(sockfd, SOL_PACKET, PACKET_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) == -1) 
{
    perror("setsockopt");
    goto closefd;
}

如果您决定为文件写入和捕获分离线程,捕获线程的 sched_fifo 策略:

ret = pthread_attr_setschedpolicy(&tattr, SCHED_FIFO);

问候, 布拉特

于 2019-09-22T13:42:42.197 回答