diff mbox series

[3/3] virtio-net: implement RX RSS processing

Message ID 20200226174809.9675-4-yuri.benditovich@daynix.com (mailing list archive)
State New, archived
Headers show
Series reference implementation of RSS | expand

Commit Message

Yuri Benditovich Feb. 26, 2020, 5:48 p.m. UTC
If VIRTIO_NET_F_RSS negotiated and RSS is enabled, process
incoming packets, calculate packet's hash and place the
packet into respective RX virtqueue.

Signed-off-by: Yuri Benditovich <yuri.benditovich@daynix.com>
---
 hw/net/virtio-net.c            | 86 +++++++++++++++++++++++++++++++++-
 include/hw/virtio/virtio-net.h |  1 +
 2 files changed, 85 insertions(+), 2 deletions(-)

Comments

Michael S. Tsirkin March 5, 2020, 1:20 p.m. UTC | #1
On Wed, Feb 26, 2020 at 07:48:09PM +0200, Yuri Benditovich wrote:
> If VIRTIO_NET_F_RSS negotiated and RSS is enabled, process
> incoming packets, calculate packet's hash and place the
> packet into respective RX virtqueue.
> 
> Signed-off-by: Yuri Benditovich <yuri.benditovich@daynix.com>
> ---
>  hw/net/virtio-net.c            | 86 +++++++++++++++++++++++++++++++++-
>  include/hw/virtio/virtio-net.h |  1 +
>  2 files changed, 85 insertions(+), 2 deletions(-)
> 
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index c5d21675a9..adf7b88d7a 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -42,6 +42,7 @@
>  #include "trace.h"
>  #include "monitor/qdev.h"
>  #include "hw/pci/pci.h"
> +#include "net_rx_pkt.h"
>  
>  #define VIRTIO_NET_VM_VERSION    11
>  
> @@ -1515,8 +1516,78 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
>      return 0;
>  }
>  
> +static uint8_t virtio_net_get_hash_type(bool isip4,
> +                                        bool isip6,
> +                                        bool isudp,
> +                                        bool istcp,
> +                                        uint32_t types)
> +{
> +    uint32_t mask;
> +    if (isip4) {
> +        if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
> +            return NetPktRssIpV4Tcp;
> +        }
> +        if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
> +            return NetPktRssIpV4Udp;
> +        }
> +        if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
> +            return NetPktRssIpV4;
> +        }
> +    } else if (isip6) {
> +        mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
> +        if (istcp && (types & mask)) {
> +            return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
> +                NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
> +        }
> +        mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
> +        if (isudp && (types & mask)) {
> +            return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
> +                NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
> +        }
> +        mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
> +        if (types & mask) {
> +            return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
> +                NetPktRssIpV6Ex : NetPktRssIpV6;


BTW we really need to fix up hw/net/net_rx_pkt.h to match qemu
coding style.
Could you do it pls?

> +        }
> +    }
> +    return 0xff;
> +}
> +
> +static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
> +                                  size_t size)
> +{
> +    VirtIONet *n = qemu_get_nic_opaque(nc);
> +    unsigned int index = nc->queue_index, new_index;
> +    struct NetRxPkt *pkt = n->rss_data.pkt;
> +    uint8_t net_hash_type;
> +    uint32_t hash;
> +    bool isip4, isip6, isudp, istcp;
> +    net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
> +                             size - n->host_hdr_len);
> +    net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
> +    if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
> +        istcp = isudp = false;
> +    }
> +    if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
> +        istcp = isudp = false;
> +    }
> +    net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
> +                                             n->rss_data.hash_types);
> +    if (net_hash_type > NetPktRssIpV6UdpEx) {
> +        return n->rss_data.default_queue;
> +    }
> +
> +    hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
> +    new_index = hash & (n->rss_data.indirections_len - 1);
> +    new_index = n->rss_data.indirections[new_index];
> +    if (index == new_index) {
> +        return -1;
> +    }
> +    return new_index;
> +}
> +
>  static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> -                                      size_t size)
> +                                      size_t size, bool no_rss)
>  {
>      VirtIONet *n = qemu_get_nic_opaque(nc);
>      VirtIONetQueue *q = virtio_net_get_subqueue(nc);
> @@ -1530,6 +1601,14 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
>          return -1;
>      }
>  
> +    if (!no_rss && n->rss_data.enabled) {
> +        int index = virtio_net_process_rss(nc, buf, size);
> +        if (index >= 0) {
> +            NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
> +            return virtio_net_receive_rcu(nc2, buf, size, true);
> +        }
> +    }
> +
>      /* hdr_len refers to the header we supply to the guest */
>      if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
>          return 0;
> @@ -1624,7 +1703,7 @@ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
>  {
>      RCU_READ_LOCK_GUARD();
>  
> -    return virtio_net_receive_rcu(nc, buf, size);
> +    return virtio_net_receive_rcu(nc, buf, size, false);
>  }
>  
>  static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
> @@ -3200,6 +3279,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>  
>      QTAILQ_INIT(&n->rsc_chains);
>      n->qdev = dev;
> +
> +    net_rx_pkt_init(&n->rss_data.pkt, false);
>  }
>  
>  static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
> @@ -3236,6 +3317,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
>      g_free(n->vqs);
>      qemu_del_nic(n->nic);
>      virtio_net_rsc_cleanup(n);
> +    net_rx_pkt_uninit(n->rss_data.pkt);
>      virtio_cleanup(vdev);
>  }
>  
> diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> index cf16f5192e..45670dd054 100644
> --- a/include/hw/virtio/virtio-net.h
> +++ b/include/hw/virtio/virtio-net.h
> @@ -209,6 +209,7 @@ struct VirtIONet {
>          uint16_t indirections[VIRTIO_NET_RSS_MAX_TABLE_LEN];
>          uint16_t indirections_len;
>          uint16_t default_queue;
> +        struct NetRxPkt *pkt;
>      } rss_data;
>  };
>  
> -- 
> 2.17.1
Yuri Benditovich March 5, 2020, 7:54 p.m. UTC | #2
On Thu, Mar 5, 2020 at 3:20 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Wed, Feb 26, 2020 at 07:48:09PM +0200, Yuri Benditovich wrote:
> > If VIRTIO_NET_F_RSS negotiated and RSS is enabled, process
> > incoming packets, calculate packet's hash and place the
> > packet into respective RX virtqueue.
> >
> > Signed-off-by: Yuri Benditovich <yuri.benditovich@daynix.com>
> > ---
> >  hw/net/virtio-net.c            | 86 +++++++++++++++++++++++++++++++++-
> >  include/hw/virtio/virtio-net.h |  1 +
> >  2 files changed, 85 insertions(+), 2 deletions(-)
> >
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index c5d21675a9..adf7b88d7a 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -42,6 +42,7 @@
> >  #include "trace.h"
> >  #include "monitor/qdev.h"
> >  #include "hw/pci/pci.h"
> > +#include "net_rx_pkt.h"
> >
> >  #define VIRTIO_NET_VM_VERSION    11
> >
> > @@ -1515,8 +1516,78 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
> >      return 0;
> >  }
> >
> > +static uint8_t virtio_net_get_hash_type(bool isip4,
> > +                                        bool isip6,
> > +                                        bool isudp,
> > +                                        bool istcp,
> > +                                        uint32_t types)
> > +{
> > +    uint32_t mask;
> > +    if (isip4) {
> > +        if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
> > +            return NetPktRssIpV4Tcp;
> > +        }
> > +        if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
> > +            return NetPktRssIpV4Udp;
> > +        }
> > +        if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
> > +            return NetPktRssIpV4;
> > +        }
> > +    } else if (isip6) {
> > +        mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
> > +        if (istcp && (types & mask)) {
> > +            return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
> > +                NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
> > +        }
> > +        mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
> > +        if (isudp && (types & mask)) {
> > +            return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
> > +                NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
> > +        }
> > +        mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
> > +        if (types & mask) {
> > +            return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
> > +                NetPktRssIpV6Ex : NetPktRssIpV6;
>
>
> BTW we really need to fix up hw/net/net_rx_pkt.h to match qemu
> coding style.
> Could you do it pls?
>

Can you please point on exact style problem in net_rx_pkt.h?

> > +        }
> > +    }
> > +    return 0xff;
> > +}
> > +
> > +static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
> > +                                  size_t size)
> > +{
> > +    VirtIONet *n = qemu_get_nic_opaque(nc);
> > +    unsigned int index = nc->queue_index, new_index;
> > +    struct NetRxPkt *pkt = n->rss_data.pkt;
> > +    uint8_t net_hash_type;
> > +    uint32_t hash;
> > +    bool isip4, isip6, isudp, istcp;
> > +    net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
> > +                             size - n->host_hdr_len);
> > +    net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
> > +    if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
> > +        istcp = isudp = false;
> > +    }
> > +    if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
> > +        istcp = isudp = false;
> > +    }
> > +    net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
> > +                                             n->rss_data.hash_types);
> > +    if (net_hash_type > NetPktRssIpV6UdpEx) {
> > +        return n->rss_data.default_queue;
> > +    }
> > +
> > +    hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
> > +    new_index = hash & (n->rss_data.indirections_len - 1);
> > +    new_index = n->rss_data.indirections[new_index];
> > +    if (index == new_index) {
> > +        return -1;
> > +    }
> > +    return new_index;
> > +}
> > +
> >  static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> > -                                      size_t size)
> > +                                      size_t size, bool no_rss)
> >  {
> >      VirtIONet *n = qemu_get_nic_opaque(nc);
> >      VirtIONetQueue *q = virtio_net_get_subqueue(nc);
> > @@ -1530,6 +1601,14 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> >          return -1;
> >      }
> >
> > +    if (!no_rss && n->rss_data.enabled) {
> > +        int index = virtio_net_process_rss(nc, buf, size);
> > +        if (index >= 0) {
> > +            NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
> > +            return virtio_net_receive_rcu(nc2, buf, size, true);
> > +        }
> > +    }
> > +
> >      /* hdr_len refers to the header we supply to the guest */
> >      if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
> >          return 0;
> > @@ -1624,7 +1703,7 @@ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
> >  {
> >      RCU_READ_LOCK_GUARD();
> >
> > -    return virtio_net_receive_rcu(nc, buf, size);
> > +    return virtio_net_receive_rcu(nc, buf, size, false);
> >  }
> >
> >  static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
> > @@ -3200,6 +3279,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> >
> >      QTAILQ_INIT(&n->rsc_chains);
> >      n->qdev = dev;
> > +
> > +    net_rx_pkt_init(&n->rss_data.pkt, false);
> >  }
> >
> >  static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
> > @@ -3236,6 +3317,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
> >      g_free(n->vqs);
> >      qemu_del_nic(n->nic);
> >      virtio_net_rsc_cleanup(n);
> > +    net_rx_pkt_uninit(n->rss_data.pkt);
> >      virtio_cleanup(vdev);
> >  }
> >
> > diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> > index cf16f5192e..45670dd054 100644
> > --- a/include/hw/virtio/virtio-net.h
> > +++ b/include/hw/virtio/virtio-net.h
> > @@ -209,6 +209,7 @@ struct VirtIONet {
> >          uint16_t indirections[VIRTIO_NET_RSS_MAX_TABLE_LEN];
> >          uint16_t indirections_len;
> >          uint16_t default_queue;
> > +        struct NetRxPkt *pkt;
> >      } rss_data;
> >  };
> >
> > --
> > 2.17.1
>
Michael S. Tsirkin March 5, 2020, 8:02 p.m. UTC | #3
On Thu, Mar 05, 2020 at 09:54:31PM +0200, Yuri Benditovich wrote:
> On Thu, Mar 5, 2020 at 3:20 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Wed, Feb 26, 2020 at 07:48:09PM +0200, Yuri Benditovich wrote:
> > > If VIRTIO_NET_F_RSS negotiated and RSS is enabled, process
> > > incoming packets, calculate packet's hash and place the
> > > packet into respective RX virtqueue.
> > >
> > > Signed-off-by: Yuri Benditovich <yuri.benditovich@daynix.com>
> > > ---
> > >  hw/net/virtio-net.c            | 86 +++++++++++++++++++++++++++++++++-
> > >  include/hw/virtio/virtio-net.h |  1 +
> > >  2 files changed, 85 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > index c5d21675a9..adf7b88d7a 100644
> > > --- a/hw/net/virtio-net.c
> > > +++ b/hw/net/virtio-net.c
> > > @@ -42,6 +42,7 @@
> > >  #include "trace.h"
> > >  #include "monitor/qdev.h"
> > >  #include "hw/pci/pci.h"
> > > +#include "net_rx_pkt.h"
> > >
> > >  #define VIRTIO_NET_VM_VERSION    11
> > >
> > > @@ -1515,8 +1516,78 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
> > >      return 0;
> > >  }
> > >
> > > +static uint8_t virtio_net_get_hash_type(bool isip4,
> > > +                                        bool isip6,
> > > +                                        bool isudp,
> > > +                                        bool istcp,
> > > +                                        uint32_t types)
> > > +{
> > > +    uint32_t mask;
> > > +    if (isip4) {
> > > +        if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
> > > +            return NetPktRssIpV4Tcp;
> > > +        }
> > > +        if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
> > > +            return NetPktRssIpV4Udp;
> > > +        }
> > > +        if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
> > > +            return NetPktRssIpV4;
> > > +        }
> > > +    } else if (isip6) {
> > > +        mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
> > > +        if (istcp && (types & mask)) {
> > > +            return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
> > > +                NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
> > > +        }
> > > +        mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
> > > +        if (isudp && (types & mask)) {
> > > +            return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
> > > +                NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
> > > +        }
> > > +        mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
> > > +        if (types & mask) {
> > > +            return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
> > > +                NetPktRssIpV6Ex : NetPktRssIpV6;
> >
> >
> > BTW we really need to fix up hw/net/net_rx_pkt.h to match qemu
> > coding style.
> > Could you do it pls?
> >
> 
> Can you please point on exact style problem in net_rx_pkt.h?

Two issues that I noticed:

- Use of "struct" instead of a typedef with struct names.
- Mixed case for enum values instead of upper case.



> > > +        }
> > > +    }
> > > +    return 0xff;
> > > +}
> > > +
> > > +static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
> > > +                                  size_t size)
> > > +{
> > > +    VirtIONet *n = qemu_get_nic_opaque(nc);
> > > +    unsigned int index = nc->queue_index, new_index;
> > > +    struct NetRxPkt *pkt = n->rss_data.pkt;
> > > +    uint8_t net_hash_type;
> > > +    uint32_t hash;
> > > +    bool isip4, isip6, isudp, istcp;
> > > +    net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
> > > +                             size - n->host_hdr_len);
> > > +    net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
> > > +    if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
> > > +        istcp = isudp = false;
> > > +    }
> > > +    if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
> > > +        istcp = isudp = false;
> > > +    }
> > > +    net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
> > > +                                             n->rss_data.hash_types);
> > > +    if (net_hash_type > NetPktRssIpV6UdpEx) {
> > > +        return n->rss_data.default_queue;
> > > +    }
> > > +
> > > +    hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
> > > +    new_index = hash & (n->rss_data.indirections_len - 1);
> > > +    new_index = n->rss_data.indirections[new_index];
> > > +    if (index == new_index) {
> > > +        return -1;
> > > +    }
> > > +    return new_index;
> > > +}
> > > +
> > >  static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> > > -                                      size_t size)
> > > +                                      size_t size, bool no_rss)
> > >  {
> > >      VirtIONet *n = qemu_get_nic_opaque(nc);
> > >      VirtIONetQueue *q = virtio_net_get_subqueue(nc);
> > > @@ -1530,6 +1601,14 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> > >          return -1;
> > >      }
> > >
> > > +    if (!no_rss && n->rss_data.enabled) {
> > > +        int index = virtio_net_process_rss(nc, buf, size);
> > > +        if (index >= 0) {
> > > +            NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
> > > +            return virtio_net_receive_rcu(nc2, buf, size, true);
> > > +        }
> > > +    }
> > > +
> > >      /* hdr_len refers to the header we supply to the guest */
> > >      if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
> > >          return 0;
> > > @@ -1624,7 +1703,7 @@ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
> > >  {
> > >      RCU_READ_LOCK_GUARD();
> > >
> > > -    return virtio_net_receive_rcu(nc, buf, size);
> > > +    return virtio_net_receive_rcu(nc, buf, size, false);
> > >  }
> > >
> > >  static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
> > > @@ -3200,6 +3279,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> > >
> > >      QTAILQ_INIT(&n->rsc_chains);
> > >      n->qdev = dev;
> > > +
> > > +    net_rx_pkt_init(&n->rss_data.pkt, false);
> > >  }
> > >
> > >  static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
> > > @@ -3236,6 +3317,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
> > >      g_free(n->vqs);
> > >      qemu_del_nic(n->nic);
> > >      virtio_net_rsc_cleanup(n);
> > > +    net_rx_pkt_uninit(n->rss_data.pkt);
> > >      virtio_cleanup(vdev);
> > >  }
> > >
> > > diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> > > index cf16f5192e..45670dd054 100644
> > > --- a/include/hw/virtio/virtio-net.h
> > > +++ b/include/hw/virtio/virtio-net.h
> > > @@ -209,6 +209,7 @@ struct VirtIONet {
> > >          uint16_t indirections[VIRTIO_NET_RSS_MAX_TABLE_LEN];
> > >          uint16_t indirections_len;
> > >          uint16_t default_queue;
> > > +        struct NetRxPkt *pkt;
> > >      } rss_data;
> > >  };
> > >
> > > --
> > > 2.17.1
> >
Yuri Benditovich March 5, 2020, 9:04 p.m. UTC | #4
On Thu, Mar 5, 2020 at 10:02 PM Michael S. Tsirkin <mst@redhat.com> wrote:

> On Thu, Mar 05, 2020 at 09:54:31PM +0200, Yuri Benditovich wrote:
> > On Thu, Mar 5, 2020 at 3:20 PM Michael S. Tsirkin <mst@redhat.com>
> wrote:
> > >
> > > On Wed, Feb 26, 2020 at 07:48:09PM +0200, Yuri Benditovich wrote:
> > > > If VIRTIO_NET_F_RSS negotiated and RSS is enabled, process
> > > > incoming packets, calculate packet's hash and place the
> > > > packet into respective RX virtqueue.
> > > >
> > > > Signed-off-by: Yuri Benditovich <yuri.benditovich@daynix.com>
> > > > ---
> > > >  hw/net/virtio-net.c            | 86
> +++++++++++++++++++++++++++++++++-
> > > >  include/hw/virtio/virtio-net.h |  1 +
> > > >  2 files changed, 85 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > > index c5d21675a9..adf7b88d7a 100644
> > > > --- a/hw/net/virtio-net.c
> > > > +++ b/hw/net/virtio-net.c
> > > > @@ -42,6 +42,7 @@
> > > >  #include "trace.h"
> > > >  #include "monitor/qdev.h"
> > > >  #include "hw/pci/pci.h"
> > > > +#include "net_rx_pkt.h"
> > > >
> > > >  #define VIRTIO_NET_VM_VERSION    11
> > > >
> > > > @@ -1515,8 +1516,78 @@ static int receive_filter(VirtIONet *n, const
> uint8_t *buf, int size)
> > > >      return 0;
> > > >  }
> > > >
> > > > +static uint8_t virtio_net_get_hash_type(bool isip4,
> > > > +                                        bool isip6,
> > > > +                                        bool isudp,
> > > > +                                        bool istcp,
> > > > +                                        uint32_t types)
> > > > +{
> > > > +    uint32_t mask;
> > > > +    if (isip4) {
> > > > +        if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
> > > > +            return NetPktRssIpV4Tcp;
> > > > +        }
> > > > +        if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
> > > > +            return NetPktRssIpV4Udp;
> > > > +        }
> > > > +        if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
> > > > +            return NetPktRssIpV4;
> > > > +        }
> > > > +    } else if (isip6) {
> > > > +        mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
> VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
> > > > +        if (istcp && (types & mask)) {
> > > > +            return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
> > > > +                NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
> > > > +        }
> > > > +        mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX |
> VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
> > > > +        if (isudp && (types & mask)) {
> > > > +            return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
> > > > +                NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
> > > > +        }
> > > > +        mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
> VIRTIO_NET_RSS_HASH_TYPE_IPv6;
> > > > +        if (types & mask) {
> > > > +            return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
> > > > +                NetPktRssIpV6Ex : NetPktRssIpV6;
> > >
> > >
> > > BTW we really need to fix up hw/net/net_rx_pkt.h to match qemu
> > > coding style.
> > > Could you do it pls?
> > >
> >
> > Can you please point on exact style problem in net_rx_pkt.h?
>
> Two issues that I noticed:
>
> - Use of "struct" instead of a typedef with struct names.
> - Mixed case for enum values instead of upper case.
>
>
I will discuss it with the maintainer of net_rx_pkt.
Note that struct without typedef especially allowed by qemu coding style
doc.
Note also that the doc rather requires camel case for enums and both
uppercase and camel case are widely used in the code.


>
>
> > > > +        }
> > > > +    }
> > > > +    return 0xff;
> > > > +}
> > > > +
> > > > +static int virtio_net_process_rss(NetClientState *nc, const uint8_t
> *buf,
> > > > +                                  size_t size)
> > > > +{
> > > > +    VirtIONet *n = qemu_get_nic_opaque(nc);
> > > > +    unsigned int index = nc->queue_index, new_index;
> > > > +    struct NetRxPkt *pkt = n->rss_data.pkt;
> > > > +    uint8_t net_hash_type;
> > > > +    uint32_t hash;
> > > > +    bool isip4, isip6, isudp, istcp;
> > > > +    net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
> > > > +                             size - n->host_hdr_len);
> > > > +    net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
> > > > +    if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
> > > > +        istcp = isudp = false;
> > > > +    }
> > > > +    if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
> > > > +        istcp = isudp = false;
> > > > +    }
> > > > +    net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp,
> istcp,
> > > > +
>  n->rss_data.hash_types);
> > > > +    if (net_hash_type > NetPktRssIpV6UdpEx) {
> > > > +        return n->rss_data.default_queue;
> > > > +    }
> > > > +
> > > > +    hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type,
> n->rss_data.key);
> > > > +    new_index = hash & (n->rss_data.indirections_len - 1);
> > > > +    new_index = n->rss_data.indirections[new_index];
> > > > +    if (index == new_index) {
> > > > +        return -1;
> > > > +    }
> > > > +    return new_index;
> > > > +}
> > > > +
> > > >  static ssize_t virtio_net_receive_rcu(NetClientState *nc, const
> uint8_t *buf,
> > > > -                                      size_t size)
> > > > +                                      size_t size, bool no_rss)
> > > >  {
> > > >      VirtIONet *n = qemu_get_nic_opaque(nc);
> > > >      VirtIONetQueue *q = virtio_net_get_subqueue(nc);
> > > > @@ -1530,6 +1601,14 @@ static ssize_t
> virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> > > >          return -1;
> > > >      }
> > > >
> > > > +    if (!no_rss && n->rss_data.enabled) {
> > > > +        int index = virtio_net_process_rss(nc, buf, size);
> > > > +        if (index >= 0) {
> > > > +            NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
> > > > +            return virtio_net_receive_rcu(nc2, buf, size, true);
> > > > +        }
> > > > +    }
> > > > +
> > > >      /* hdr_len refers to the header we supply to the guest */
> > > >      if (!virtio_net_has_buffers(q, size + n->guest_hdr_len -
> n->host_hdr_len)) {
> > > >          return 0;
> > > > @@ -1624,7 +1703,7 @@ static ssize_t
> virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
> > > >  {
> > > >      RCU_READ_LOCK_GUARD();
> > > >
> > > > -    return virtio_net_receive_rcu(nc, buf, size);
> > > > +    return virtio_net_receive_rcu(nc, buf, size, false);
> > > >  }
> > > >
> > > >  static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
> > > > @@ -3200,6 +3279,8 @@ static void
> virtio_net_device_realize(DeviceState *dev, Error **errp)
> > > >
> > > >      QTAILQ_INIT(&n->rsc_chains);
> > > >      n->qdev = dev;
> > > > +
> > > > +    net_rx_pkt_init(&n->rss_data.pkt, false);
> > > >  }
> > > >
> > > >  static void virtio_net_device_unrealize(DeviceState *dev, Error
> **errp)
> > > > @@ -3236,6 +3317,7 @@ static void
> virtio_net_device_unrealize(DeviceState *dev, Error **errp)
> > > >      g_free(n->vqs);
> > > >      qemu_del_nic(n->nic);
> > > >      virtio_net_rsc_cleanup(n);
> > > > +    net_rx_pkt_uninit(n->rss_data.pkt);
> > > >      virtio_cleanup(vdev);
> > > >  }
> > > >
> > > > diff --git a/include/hw/virtio/virtio-net.h
> b/include/hw/virtio/virtio-net.h
> > > > index cf16f5192e..45670dd054 100644
> > > > --- a/include/hw/virtio/virtio-net.h
> > > > +++ b/include/hw/virtio/virtio-net.h
> > > > @@ -209,6 +209,7 @@ struct VirtIONet {
> > > >          uint16_t indirections[VIRTIO_NET_RSS_MAX_TABLE_LEN];
> > > >          uint16_t indirections_len;
> > > >          uint16_t default_queue;
> > > > +        struct NetRxPkt *pkt;
> > > >      } rss_data;
> > > >  };
> > > >
> > > > --
> > > > 2.17.1
> > >
>
>
diff mbox series

Patch

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index c5d21675a9..adf7b88d7a 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -42,6 +42,7 @@ 
 #include "trace.h"
 #include "monitor/qdev.h"
 #include "hw/pci/pci.h"
+#include "net_rx_pkt.h"
 
 #define VIRTIO_NET_VM_VERSION    11
 
@@ -1515,8 +1516,78 @@  static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
     return 0;
 }
 
+static uint8_t virtio_net_get_hash_type(bool isip4,
+                                        bool isip6,
+                                        bool isudp,
+                                        bool istcp,
+                                        uint32_t types)
+{
+    uint32_t mask;
+    if (isip4) {
+        if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
+            return NetPktRssIpV4Tcp;
+        }
+        if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
+            return NetPktRssIpV4Udp;
+        }
+        if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+            return NetPktRssIpV4;
+        }
+    } else if (isip6) {
+        mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
+        if (istcp && (types & mask)) {
+            return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
+                NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
+        }
+        mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
+        if (isudp && (types & mask)) {
+            return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
+                NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
+        }
+        mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+        if (types & mask) {
+            return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
+                NetPktRssIpV6Ex : NetPktRssIpV6;
+        }
+    }
+    return 0xff;
+}
+
+static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
+                                  size_t size)
+{
+    VirtIONet *n = qemu_get_nic_opaque(nc);
+    unsigned int index = nc->queue_index, new_index;
+    struct NetRxPkt *pkt = n->rss_data.pkt;
+    uint8_t net_hash_type;
+    uint32_t hash;
+    bool isip4, isip6, isudp, istcp;
+    net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
+                             size - n->host_hdr_len);
+    net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
+    if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
+        istcp = isudp = false;
+    }
+    if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
+        istcp = isudp = false;
+    }
+    net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
+                                             n->rss_data.hash_types);
+    if (net_hash_type > NetPktRssIpV6UdpEx) {
+        return n->rss_data.default_queue;
+    }
+
+    hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
+    new_index = hash & (n->rss_data.indirections_len - 1);
+    new_index = n->rss_data.indirections[new_index];
+    if (index == new_index) {
+        return -1;
+    }
+    return new_index;
+}
+
 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
-                                      size_t size)
+                                      size_t size, bool no_rss)
 {
     VirtIONet *n = qemu_get_nic_opaque(nc);
     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
@@ -1530,6 +1601,14 @@  static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
         return -1;
     }
 
+    if (!no_rss && n->rss_data.enabled) {
+        int index = virtio_net_process_rss(nc, buf, size);
+        if (index >= 0) {
+            NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
+            return virtio_net_receive_rcu(nc2, buf, size, true);
+        }
+    }
+
     /* hdr_len refers to the header we supply to the guest */
     if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
         return 0;
@@ -1624,7 +1703,7 @@  static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
 {
     RCU_READ_LOCK_GUARD();
 
-    return virtio_net_receive_rcu(nc, buf, size);
+    return virtio_net_receive_rcu(nc, buf, size, false);
 }
 
 static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
@@ -3200,6 +3279,8 @@  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
 
     QTAILQ_INIT(&n->rsc_chains);
     n->qdev = dev;
+
+    net_rx_pkt_init(&n->rss_data.pkt, false);
 }
 
 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
@@ -3236,6 +3317,7 @@  static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
     g_free(n->vqs);
     qemu_del_nic(n->nic);
     virtio_net_rsc_cleanup(n);
+    net_rx_pkt_uninit(n->rss_data.pkt);
     virtio_cleanup(vdev);
 }
 
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index cf16f5192e..45670dd054 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -209,6 +209,7 @@  struct VirtIONet {
         uint16_t indirections[VIRTIO_NET_RSS_MAX_TABLE_LEN];
         uint16_t indirections_len;
         uint16_t default_queue;
+        struct NetRxPkt *pkt;
     } rss_data;
 };