#include #include #include #include #include #include #include #include #include #include #include #include /* Us */ #include #include #include typedef unsigned long dma_addr_t; char *rxBuffers[256]; static void print_packet(unsigned char *buffer) { int i; printf("%02x:%02x:%02x:%02x:%02x:%02x -> %02x:%02x:%02x:%02x:%02x:%02x [%04x]:", buffer[6], buffer[7], buffer[8], buffer[9], buffer[10], buffer[11], buffer[0], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], be16toh(*((__u16*)(&buffer[12]))) ); for (i = 14; i < 32; i++) { printf("%02x", buffer[i]); } } static inline void rtl8169_mark_to_asic(struct RxDesc *desc, __u32 rx_buf_sz) { __u32 eor = le32_to_cpu(desc->opts1) & RingEnd; /* Force memory writes to complete before releasing descriptor */ dma_wmb(); desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); } static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, __u32 rx_buf_sz) { desc->addr = cpu_to_le64(mapping); rtl8169_mark_to_asic(desc, rx_buf_sz); } static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) { desc->opts1 |= cpu_to_le32(RingEnd); } int rtl8169_rx_fill(int device, struct RxDesc *rxRing, struct iomem *data) { int i; for (i = 0; i < NUM_RX_DESC; i++) { rtl8169_map_to_asic(&rxRing[i], data->iova + i * 2048, 2048); rxBuffers[i] = (char *)(data->vaddr + i * 2048); } rtl8169_mark_as_last_descriptor(&rxRing[NUM_RX_DESC - 1]); return 0; } void usage(char *name) { printf("usage: %s \n", name); } int main(int argc, char *argv[]) { int container = -1, group = -1, device, i = 0; int group_id; char group_uuid[64]; /* 37 should be enough */ struct vfio_group_status group_status = { .argsz = sizeof(group_status) }; struct vfio_iommu_type1_info iommu_info = { .argsz = sizeof(iommu_info) }; struct vfio_device_info device_info = { .argsz = sizeof(device_info) }; struct vfio_region_info region_info = { .argsz = sizeof(region_info) }; struct RxDesc *rxRing = NULL; void *iobase, *iocur; struct iomem rx_data; int ret; if (argc != 3) { usage(argv[0]); return -1; } iobase = iomem_init(); if (!iobase) return -ENOMEM; iocur = iobase; /* Create a new container */ container = get_container(); if (container < 0) goto out; group_id = atoi(argv[1]); group = get_group(group_id); if (group < 0) goto out; strncpy(group_uuid, argv[2], sizeof(group_uuid)); device = vfio_init_dev(group, container, &group_status, &iommu_info, &device_info, ®ion_info, group_uuid); printf("Region:%d size %llu, offset 0x%llx, flags 0x%x\n", i, region_info.size, region_info.offset, region_info.flags); rxRing = mmap(NULL, region_info.size, PROT_READ | PROT_WRITE, MAP_SHARED, device, region_info.offset); if (rxRing == MAP_FAILED) { printf("Could not reserve on contiguous 4GB address space\n"); goto out; } ret = iomem_alloc(device, 2 * 1024 * 1024, &iocur, &rx_data); if (ret) goto out; if (rtl8169_rx_fill(device, rxRing, &rx_data) != 0) { printf("Could not fill ring\n"); goto out; } /* signal ready */ ioctl(device, 500, NULL); i = 0; while (1) { if (i >= NUM_RX_DESC) i = 0; for (; i < NUM_RX_DESC; i++) { __u32 status; status = le32_to_cpu(rxRing[i].opts1) & ~0; /// either ~(RxBOVF | RxFOVF) or ~0; if (status & DescOwn) { usleep(100*1000); break; } /* This barrier is needed to keep us from reading * any other fields out of the Rx descriptor until * we know the status of DescOwn */ dma_rmb(); if (unlikely(status & RxRES)) { printf("Rx ERROR. status = %08x\n",status); /* dev->stats.rx_errors++; if (status & (RxRWT | RxRUNT)) dev->stats.rx_length_errors++; if (status & RxCRC) dev->stats.rx_crc_errors++; if (status & RxFOVF) { rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); dev->stats.rx_fifo_errors++; } */ if ((status & (RxRUNT | RxCRC)) && !(status & (RxRWT | RxFOVF)) /* && (dev->features & NETIF_F_RXALL) */ ) goto process_pkt; } else { //dma_addr_t addr; int pkt_size; process_pkt: //addr = le64_to_cpu(rxRing[i].addr); if (1) // likely(!(dev->features & NETIF_F_RXFCS))) pkt_size = (status & 0x00003fff) - 4; else pkt_size = status & 0x00003fff; /* * The driver does not support incoming fragmented * frames. They are seen as a symptom of over-mtu * sized frames. */ /* if (unlikely(rtl8169_fragmented_frame(status))) { dev->stats.rx_dropped++; dev->stats.rx_length_errors++; goto release_descriptor; } skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], tp, pkt_size, addr); if (!skb) { dev->stats.rx_dropped++; goto release_descriptor; } rtl8169_rx_csum(skb, status); skb_put(skb, pkt_size); skb->protocol = eth_type_trans(skb, dev); rtl8169_rx_vlan_tag(desc, skb); if (skb->pkt_type == PACKET_MULTICAST) dev->stats.multicast++; napi_gro_receive(&tp->napi, skb); __u64_stats_update_begin(&tp->rx_stats.syncp); tp->rx_stats.packets++; tp->rx_stats.bytes += pkt_size; __u64_stats_update_end(&tp->rx_stats.syncp); */ printf("desc[%03d]: size= %5d ", i, pkt_size); print_packet((unsigned char *)rxBuffers[i]); printf("\n"); } /* release_descriptor: */ rxRing[i].opts2 = 0; rtl8169_mark_to_asic(&rxRing[i], 2048); } } out: if (rxRing) munmap(rxRing, region_info.size); if (group) close(group); if (container) close(container); return -1; }