Message ID | 20220224212314.1326-19-gdawar@xilinx.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [RFC,v2,01/19] vhost: move the backend feature bits to vhost_types.h | expand |
On Thu, Feb 24, 2022 at 10:29 PM Gautam Dawar <gautam.dawar@xilinx.com> wrote: > > This patch implements a simple unicast filter for vDPA simulator. > > Signed-off-by: Jason Wang <jasowang@redhat.com> > Signed-off-by: Gautam Dawar <gdawar@xilinx.com> > --- > drivers/vdpa/vdpa_sim/vdpa_sim_net.c | 49 ++++++++++++++++++---------- > 1 file changed, 31 insertions(+), 18 deletions(-) > > diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c > index 05d552cb7f94..ed5ade4ae570 100644 > --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c > +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c > @@ -47,13 +47,28 @@ static void vdpasim_net_complete(struct vdpasim_virtqueue *vq, size_t len) > local_bh_enable(); > } > > +static bool receive_filter(struct vdpasim *vdpasim, size_t len) > +{ > + bool modern = vdpasim->features & (1ULL << VIRTIO_F_VERSION_1); > + size_t hdr_len = modern ? sizeof(struct virtio_net_hdr_v1) : > + sizeof(struct virtio_net_hdr); > + > + if (len < ETH_ALEN + hdr_len) > + return false; > + > + if (!strncmp(vdpasim->buffer + hdr_len, > + vdpasim->config.mac, ETH_ALEN)) > + return true; > + > + return false; > +} > + > static void vdpasim_net_work(struct work_struct *work) > { > struct vdpasim *vdpasim = container_of(work, struct vdpasim, work); > struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; > struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; > ssize_t read, write; > - size_t total_write; > int pkts = 0; > int err; > > @@ -66,36 +81,34 @@ static void vdpasim_net_work(struct work_struct *work) > goto out; > > while (true) { > - total_write = 0; > err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL, > &txq->head, GFP_ATOMIC); > if (err <= 0) > break; > > + read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov, > + vdpasim->buffer, > + PAGE_SIZE); > + > + if (!receive_filter(vdpasim, read)) { > + vdpasim_complete(txq, 0); vdpasim_net_complete? Thanks! > + continue; > + } > + > err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov, > &rxq->head, GFP_ATOMIC); > if (err <= 0) { > - vringh_complete_iotlb(&txq->vring, txq->head, 0); > + vdpasim_net_complete(txq, 0); > break; > } > > - while (true) { > - read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov, > - vdpasim->buffer, > - PAGE_SIZE); > - if (read <= 0) > - break; > - > - write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov, > - vdpasim->buffer, read); > - if (write <= 0) > - break; > - > - total_write += write; > - } > + write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov, > + vdpasim->buffer, read); > + if (write <= 0) > + break; > > vdpasim_net_complete(txq, 0); > - vdpasim_net_complete(rxq, total_write); > + vdpasim_net_complete(rxq, write); > > if (++pkts > 4) { > schedule_work(&vdpasim->work); > -- > 2.25.0 >
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c index 05d552cb7f94..ed5ade4ae570 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c @@ -47,13 +47,28 @@ static void vdpasim_net_complete(struct vdpasim_virtqueue *vq, size_t len) local_bh_enable(); } +static bool receive_filter(struct vdpasim *vdpasim, size_t len) +{ + bool modern = vdpasim->features & (1ULL << VIRTIO_F_VERSION_1); + size_t hdr_len = modern ? sizeof(struct virtio_net_hdr_v1) : + sizeof(struct virtio_net_hdr); + + if (len < ETH_ALEN + hdr_len) + return false; + + if (!strncmp(vdpasim->buffer + hdr_len, + vdpasim->config.mac, ETH_ALEN)) + return true; + + return false; +} + static void vdpasim_net_work(struct work_struct *work) { struct vdpasim *vdpasim = container_of(work, struct vdpasim, work); struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; ssize_t read, write; - size_t total_write; int pkts = 0; int err; @@ -66,36 +81,34 @@ static void vdpasim_net_work(struct work_struct *work) goto out; while (true) { - total_write = 0; err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL, &txq->head, GFP_ATOMIC); if (err <= 0) break; + read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov, + vdpasim->buffer, + PAGE_SIZE); + + if (!receive_filter(vdpasim, read)) { + vdpasim_complete(txq, 0); + continue; + } + err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov, &rxq->head, GFP_ATOMIC); if (err <= 0) { - vringh_complete_iotlb(&txq->vring, txq->head, 0); + vdpasim_net_complete(txq, 0); break; } - while (true) { - read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov, - vdpasim->buffer, - PAGE_SIZE); - if (read <= 0) - break; - - write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov, - vdpasim->buffer, read); - if (write <= 0) - break; - - total_write += write; - } + write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov, + vdpasim->buffer, read); + if (write <= 0) + break; vdpasim_net_complete(txq, 0); - vdpasim_net_complete(rxq, total_write); + vdpasim_net_complete(rxq, write); if (++pkts > 4) { schedule_work(&vdpasim->work);