diff mbox series

[2/3] vdpa: load vlan configuration at NIC startup

Message ID 20220906163621.1144675-3-eperezma@redhat.com (mailing list archive)
State New, archived
Headers show
Series Vhost-vdpa Shadow Virtqueue VLAN support | expand

Commit Message

Eugenio Perez Martin Sept. 6, 2022, 4:36 p.m. UTC
To have enabled vlans at device startup may happen in the destination of
a live migration, so this configuration must be restored.

At this moment the code is not accessible, since SVQ refuses to start if
vlan feature is exposed by the device.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 44 insertions(+), 2 deletions(-)

Comments

Jason Wang Sept. 9, 2022, 6:38 a.m. UTC | #1
On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
>
> To have enabled vlans at device startup may happen in the destination of
> a live migration, so this configuration must be restored.
>
> At this moment the code is not accessible, since SVQ refuses to start if
> vlan feature is exposed by the device.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
>  net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 44 insertions(+), 2 deletions(-)
>
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 4bc3fd01a8..ecbfd08eb9 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
>      BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
>      BIT_ULL(VIRTIO_NET_F_STANDBY);
>
> +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> +
>  VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
>  {
>      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
>      return *s->status != VIRTIO_NET_OK;
>  }
>
> +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> +                                           const VirtIONet *n,
> +                                           uint16_t vid)
> +{
> +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> +                                                  &vid, sizeof(vid));
> +    if (unlikely(dev_written < 0)) {
> +        return dev_written;
> +    }
> +
> +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> +        return -EINVAL;
> +    }
> +
> +    return 0;
> +}
> +
> +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> +                                    const VirtIONet *n)
> +{
> +    uint64_t features = n->parent_obj.guest_features;
> +
> +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> +        return 0;
> +    }
> +
> +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> +            if (n->vlans[i] & (1U << j)) {
> +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);

This seems to cause a lot of latency if the driver has a lot of vlans.

I wonder if it's simply to let all vlan traffic go by disabling
CTRL_VLAN feature at vDPA layer.

Thanks

> +                if (unlikely(r != 0)) {
> +                    return r;
> +                }
> +            }
> +        }
> +    }
> +
> +    return 0;
> +}
> +
>  static int vhost_vdpa_net_load(NetClientState *nc)
>  {
>      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> @@ -445,8 +488,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
>      if (unlikely(r)) {
>          return r;
>      }
> -
> -    return 0;
> +    return vhost_vdpa_net_load_vlan(s, n);
>  }
>
>  static NetClientInfo net_vhost_vdpa_cvq_info = {
> --
> 2.31.1
>
Jason Wang Sept. 9, 2022, 6:40 a.m. UTC | #2
On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> >
> > To have enabled vlans at device startup may happen in the destination of
> > a live migration, so this configuration must be restored.
> >
> > At this moment the code is not accessible, since SVQ refuses to start if
> > vlan feature is exposed by the device.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >  net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> >  1 file changed, 44 insertions(+), 2 deletions(-)
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index 4bc3fd01a8..ecbfd08eb9 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> >      BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> >      BIT_ULL(VIRTIO_NET_F_STANDBY);
> >
> > +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> > +
> >  VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> >  {
> >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> >      return *s->status != VIRTIO_NET_OK;
> >  }
> >
> > +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> > +                                           const VirtIONet *n,
> > +                                           uint16_t vid)
> > +{
> > +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> > +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> > +                                                  &vid, sizeof(vid));
> > +    if (unlikely(dev_written < 0)) {
> > +        return dev_written;
> > +    }
> > +
> > +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> > +        return -EINVAL;
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> > +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> > +                                    const VirtIONet *n)
> > +{
> > +    uint64_t features = n->parent_obj.guest_features;
> > +
> > +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> > +        return 0;
> > +    }
> > +
> > +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> > +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> > +            if (n->vlans[i] & (1U << j)) {
> > +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
>
> This seems to cause a lot of latency if the driver has a lot of vlans.
>
> I wonder if it's simply to let all vlan traffic go by disabling
> CTRL_VLAN feature at vDPA layer.

Another idea is to extend the spec to allow us to accept a bitmap of
the vlan ids via a single command, then we will be fine.

Thanks

>
> Thanks
>
> > +                if (unlikely(r != 0)) {
> > +                    return r;
> > +                }
> > +            }
> > +        }
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> >  static int vhost_vdpa_net_load(NetClientState *nc)
> >  {
> >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > @@ -445,8 +488,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
> >      if (unlikely(r)) {
> >          return r;
> >      }
> > -
> > -    return 0;
> > +    return vhost_vdpa_net_load_vlan(s, n);
> >  }
> >
> >  static NetClientInfo net_vhost_vdpa_cvq_info = {
> > --
> > 2.31.1
> >
Eugenio Perez Martin Sept. 9, 2022, 8:01 a.m. UTC | #3
On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> > >
> > > To have enabled vlans at device startup may happen in the destination of
> > > a live migration, so this configuration must be restored.
> > >
> > > At this moment the code is not accessible, since SVQ refuses to start if
> > > vlan feature is exposed by the device.
> > >
> > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > ---
> > >  net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> > >  1 file changed, 44 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > index 4bc3fd01a8..ecbfd08eb9 100644
> > > --- a/net/vhost-vdpa.c
> > > +++ b/net/vhost-vdpa.c
> > > @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> > >      BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> > >      BIT_ULL(VIRTIO_NET_F_STANDBY);
> > >
> > > +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> > > +
> > >  VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> > >  {
> > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> > >      return *s->status != VIRTIO_NET_OK;
> > >  }
> > >
> > > +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> > > +                                           const VirtIONet *n,
> > > +                                           uint16_t vid)
> > > +{
> > > +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> > > +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> > > +                                                  &vid, sizeof(vid));
> > > +    if (unlikely(dev_written < 0)) {
> > > +        return dev_written;
> > > +    }
> > > +
> > > +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> > > +        return -EINVAL;
> > > +    }
> > > +
> > > +    return 0;
> > > +}
> > > +
> > > +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> > > +                                    const VirtIONet *n)
> > > +{
> > > +    uint64_t features = n->parent_obj.guest_features;
> > > +
> > > +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> > > +        return 0;
> > > +    }
> > > +
> > > +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> > > +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> > > +            if (n->vlans[i] & (1U << j)) {
> > > +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
> >
> > This seems to cause a lot of latency if the driver has a lot of vlans.
> >
> > I wonder if it's simply to let all vlan traffic go by disabling
> > CTRL_VLAN feature at vDPA layer.
>

The guest will not be able to recover that vlan configuration.

> Another idea is to extend the spec to allow us to accept a bitmap of
> the vlan ids via a single command, then we will be fine.
>

I'm not sure if adding more ways to configure something is the answer,
but I'd be ok to implement it.

Another idea is to allow the sending of many CVQ commands in parallel.
It shouldn't be very hard to achieve using exposed buffers as ring
buffers, and it will short down the start of the devices with many
features.

Thanks!

> Thanks
>
> >
> > Thanks
> >
> > > +                if (unlikely(r != 0)) {
> > > +                    return r;
> > > +                }
> > > +            }
> > > +        }
> > > +    }
> > > +
> > > +    return 0;
> > > +}
> > > +
> > >  static int vhost_vdpa_net_load(NetClientState *nc)
> > >  {
> > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > @@ -445,8 +488,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
> > >      if (unlikely(r)) {
> > >          return r;
> > >      }
> > > -
> > > -    return 0;
> > > +    return vhost_vdpa_net_load_vlan(s, n);
> > >  }
> > >
> > >  static NetClientInfo net_vhost_vdpa_cvq_info = {
> > > --
> > > 2.31.1
> > >
>
Michael S. Tsirkin Sept. 9, 2022, 8:38 a.m. UTC | #4
On Fri, Sep 09, 2022 at 10:01:16AM +0200, Eugenio Perez Martin wrote:
> On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> > > >
> > > > To have enabled vlans at device startup may happen in the destination of
> > > > a live migration, so this configuration must be restored.
> > > >
> > > > At this moment the code is not accessible, since SVQ refuses to start if
> > > > vlan feature is exposed by the device.
> > > >
> > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > > ---
> > > >  net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> > > >  1 file changed, 44 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > > index 4bc3fd01a8..ecbfd08eb9 100644
> > > > --- a/net/vhost-vdpa.c
> > > > +++ b/net/vhost-vdpa.c
> > > > @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> > > >      BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> > > >      BIT_ULL(VIRTIO_NET_F_STANDBY);
> > > >
> > > > +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> > > > +
> > > >  VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> > > >  {
> > > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> > > >      return *s->status != VIRTIO_NET_OK;
> > > >  }
> > > >
> > > > +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> > > > +                                           const VirtIONet *n,
> > > > +                                           uint16_t vid)
> > > > +{
> > > > +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> > > > +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> > > > +                                                  &vid, sizeof(vid));
> > > > +    if (unlikely(dev_written < 0)) {
> > > > +        return dev_written;
> > > > +    }
> > > > +
> > > > +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> > > > +        return -EINVAL;
> > > > +    }
> > > > +
> > > > +    return 0;
> > > > +}
> > > > +
> > > > +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> > > > +                                    const VirtIONet *n)
> > > > +{
> > > > +    uint64_t features = n->parent_obj.guest_features;
> > > > +
> > > > +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> > > > +        return 0;
> > > > +    }
> > > > +
> > > > +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> > > > +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> > > > +            if (n->vlans[i] & (1U << j)) {
> > > > +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
> > >
> > > This seems to cause a lot of latency if the driver has a lot of vlans.
> > >
> > > I wonder if it's simply to let all vlan traffic go by disabling
> > > CTRL_VLAN feature at vDPA layer.
> >
> 
> The guest will not be able to recover that vlan configuration.
> 
> > Another idea is to extend the spec to allow us to accept a bitmap of
> > the vlan ids via a single command, then we will be fine.
> >
> 
> I'm not sure if adding more ways to configure something is the answer,
> but I'd be ok to implement it.
> 
> Another idea is to allow the sending of many CVQ commands in parallel.
> It shouldn't be very hard to achieve using exposed buffers as ring
> buffers, and it will short down the start of the devices with many
> features.

This would seem like a reasonable second step.  A first step would be to
measure the overhead to make sure there's actually a need to optimize
this.


> Thanks!
> 
> > Thanks
> >
> > >
> > > Thanks
> > >
> > > > +                if (unlikely(r != 0)) {
> > > > +                    return r;
> > > > +                }
> > > > +            }
> > > > +        }
> > > > +    }
> > > > +
> > > > +    return 0;
> > > > +}
> > > > +
> > > >  static int vhost_vdpa_net_load(NetClientState *nc)
> > > >  {
> > > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > @@ -445,8 +488,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
> > > >      if (unlikely(r)) {
> > > >          return r;
> > > >      }
> > > > -
> > > > -    return 0;
> > > > +    return vhost_vdpa_net_load_vlan(s, n);
> > > >  }
> > > >
> > > >  static NetClientInfo net_vhost_vdpa_cvq_info = {
> > > > --
> > > > 2.31.1
> > > >
> >
Jason Wang Sept. 14, 2022, 2:20 a.m. UTC | #5
On Fri, Sep 9, 2022 at 4:02 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
>
> On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> > > >
> > > > To have enabled vlans at device startup may happen in the destination of
> > > > a live migration, so this configuration must be restored.
> > > >
> > > > At this moment the code is not accessible, since SVQ refuses to start if
> > > > vlan feature is exposed by the device.
> > > >
> > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > > ---
> > > >  net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> > > >  1 file changed, 44 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > > index 4bc3fd01a8..ecbfd08eb9 100644
> > > > --- a/net/vhost-vdpa.c
> > > > +++ b/net/vhost-vdpa.c
> > > > @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> > > >      BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> > > >      BIT_ULL(VIRTIO_NET_F_STANDBY);
> > > >
> > > > +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> > > > +
> > > >  VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> > > >  {
> > > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> > > >      return *s->status != VIRTIO_NET_OK;
> > > >  }
> > > >
> > > > +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> > > > +                                           const VirtIONet *n,
> > > > +                                           uint16_t vid)
> > > > +{
> > > > +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> > > > +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> > > > +                                                  &vid, sizeof(vid));
> > > > +    if (unlikely(dev_written < 0)) {
> > > > +        return dev_written;
> > > > +    }
> > > > +
> > > > +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> > > > +        return -EINVAL;
> > > > +    }
> > > > +
> > > > +    return 0;
> > > > +}
> > > > +
> > > > +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> > > > +                                    const VirtIONet *n)
> > > > +{
> > > > +    uint64_t features = n->parent_obj.guest_features;
> > > > +
> > > > +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> > > > +        return 0;
> > > > +    }
> > > > +
> > > > +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> > > > +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> > > > +            if (n->vlans[i] & (1U << j)) {
> > > > +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
> > >
> > > This seems to cause a lot of latency if the driver has a lot of vlans.
> > >
> > > I wonder if it's simply to let all vlan traffic go by disabling
> > > CTRL_VLAN feature at vDPA layer.
> >
>
> The guest will not be able to recover that vlan configuration.

Spec said it's best effort and we actually don't do this for
vhost-net/user for years and manage to survive.

>
> > Another idea is to extend the spec to allow us to accept a bitmap of
> > the vlan ids via a single command, then we will be fine.
> >
>
> I'm not sure if adding more ways to configure something is the answer,
> but I'd be ok to implement it.
>
> Another idea is to allow the sending of many CVQ commands in parallel.
> It shouldn't be very hard to achieve using exposed buffers as ring
> buffers, and it will short down the start of the devices with many
> features.

In the extreme case, what if guests decide to filter all of the vlans?
It means we need MAX_VLAN commands which may exceeds the size of the
control virtqueue.

Thanks

>
> Thanks!
>
> > Thanks
> >
> > >
> > > Thanks
> > >
> > > > +                if (unlikely(r != 0)) {
> > > > +                    return r;
> > > > +                }
> > > > +            }
> > > > +        }
> > > > +    }
> > > > +
> > > > +    return 0;
> > > > +}
> > > > +
> > > >  static int vhost_vdpa_net_load(NetClientState *nc)
> > > >  {
> > > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > @@ -445,8 +488,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
> > > >      if (unlikely(r)) {
> > > >          return r;
> > > >      }
> > > > -
> > > > -    return 0;
> > > > +    return vhost_vdpa_net_load_vlan(s, n);
> > > >  }
> > > >
> > > >  static NetClientInfo net_vhost_vdpa_cvq_info = {
> > > > --
> > > > 2.31.1
> > > >
> >
>
Jason Wang Sept. 14, 2022, 2:22 a.m. UTC | #6
On Fri, Sep 9, 2022 at 4:38 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Fri, Sep 09, 2022 at 10:01:16AM +0200, Eugenio Perez Martin wrote:
> > On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> > > > >
> > > > > To have enabled vlans at device startup may happen in the destination of
> > > > > a live migration, so this configuration must be restored.
> > > > >
> > > > > At this moment the code is not accessible, since SVQ refuses to start if
> > > > > vlan feature is exposed by the device.
> > > > >
> > > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > > > ---
> > > > >  net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> > > > >  1 file changed, 44 insertions(+), 2 deletions(-)
> > > > >
> > > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > > > index 4bc3fd01a8..ecbfd08eb9 100644
> > > > > --- a/net/vhost-vdpa.c
> > > > > +++ b/net/vhost-vdpa.c
> > > > > @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> > > > >      BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> > > > >      BIT_ULL(VIRTIO_NET_F_STANDBY);
> > > > >
> > > > > +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> > > > > +
> > > > >  VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> > > > >  {
> > > > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > > @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> > > > >      return *s->status != VIRTIO_NET_OK;
> > > > >  }
> > > > >
> > > > > +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> > > > > +                                           const VirtIONet *n,
> > > > > +                                           uint16_t vid)
> > > > > +{
> > > > > +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> > > > > +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> > > > > +                                                  &vid, sizeof(vid));
> > > > > +    if (unlikely(dev_written < 0)) {
> > > > > +        return dev_written;
> > > > > +    }
> > > > > +
> > > > > +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> > > > > +        return -EINVAL;
> > > > > +    }
> > > > > +
> > > > > +    return 0;
> > > > > +}
> > > > > +
> > > > > +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> > > > > +                                    const VirtIONet *n)
> > > > > +{
> > > > > +    uint64_t features = n->parent_obj.guest_features;
> > > > > +
> > > > > +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> > > > > +        return 0;
> > > > > +    }
> > > > > +
> > > > > +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> > > > > +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> > > > > +            if (n->vlans[i] & (1U << j)) {
> > > > > +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
> > > >
> > > > This seems to cause a lot of latency if the driver has a lot of vlans.
> > > >
> > > > I wonder if it's simply to let all vlan traffic go by disabling
> > > > CTRL_VLAN feature at vDPA layer.
> > >
> >
> > The guest will not be able to recover that vlan configuration.
> >
> > > Another idea is to extend the spec to allow us to accept a bitmap of
> > > the vlan ids via a single command, then we will be fine.
> > >
> >
> > I'm not sure if adding more ways to configure something is the answer,
> > but I'd be ok to implement it.
> >
> > Another idea is to allow the sending of many CVQ commands in parallel.
> > It shouldn't be very hard to achieve using exposed buffers as ring
> > buffers, and it will short down the start of the devices with many
> > features.
>
> This would seem like a reasonable second step.  A first step would be to
> measure the overhead to make sure there's actually a need to optimize
> this.

+1.

Thanks

>
>
> > Thanks!
> >
> > > Thanks
> > >
> > > >
> > > > Thanks
> > > >
> > > > > +                if (unlikely(r != 0)) {
> > > > > +                    return r;
> > > > > +                }
> > > > > +            }
> > > > > +        }
> > > > > +    }
> > > > > +
> > > > > +    return 0;
> > > > > +}
> > > > > +
> > > > >  static int vhost_vdpa_net_load(NetClientState *nc)
> > > > >  {
> > > > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > > @@ -445,8 +488,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
> > > > >      if (unlikely(r)) {
> > > > >          return r;
> > > > >      }
> > > > > -
> > > > > -    return 0;
> > > > > +    return vhost_vdpa_net_load_vlan(s, n);
> > > > >  }
> > > > >
> > > > >  static NetClientInfo net_vhost_vdpa_cvq_info = {
> > > > > --
> > > > > 2.31.1
> > > > >
> > >
>
Eugenio Perez Martin Sept. 14, 2022, 11:01 a.m. UTC | #7
On Wed, Sep 14, 2022 at 4:20 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Fri, Sep 9, 2022 at 4:02 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
> >
> > On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> > > > >
> > > > > To have enabled vlans at device startup may happen in the destination of
> > > > > a live migration, so this configuration must be restored.
> > > > >
> > > > > At this moment the code is not accessible, since SVQ refuses to start if
> > > > > vlan feature is exposed by the device.
> > > > >
> > > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > > > ---
> > > > >  net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> > > > >  1 file changed, 44 insertions(+), 2 deletions(-)
> > > > >
> > > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > > > index 4bc3fd01a8..ecbfd08eb9 100644
> > > > > --- a/net/vhost-vdpa.c
> > > > > +++ b/net/vhost-vdpa.c
> > > > > @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> > > > >      BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> > > > >      BIT_ULL(VIRTIO_NET_F_STANDBY);
> > > > >
> > > > > +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> > > > > +
> > > > >  VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> > > > >  {
> > > > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > > @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> > > > >      return *s->status != VIRTIO_NET_OK;
> > > > >  }
> > > > >
> > > > > +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> > > > > +                                           const VirtIONet *n,
> > > > > +                                           uint16_t vid)
> > > > > +{
> > > > > +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> > > > > +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> > > > > +                                                  &vid, sizeof(vid));
> > > > > +    if (unlikely(dev_written < 0)) {
> > > > > +        return dev_written;
> > > > > +    }
> > > > > +
> > > > > +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> > > > > +        return -EINVAL;
> > > > > +    }
> > > > > +
> > > > > +    return 0;
> > > > > +}
> > > > > +
> > > > > +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> > > > > +                                    const VirtIONet *n)
> > > > > +{
> > > > > +    uint64_t features = n->parent_obj.guest_features;
> > > > > +
> > > > > +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> > > > > +        return 0;
> > > > > +    }
> > > > > +
> > > > > +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> > > > > +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> > > > > +            if (n->vlans[i] & (1U << j)) {
> > > > > +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
> > > >
> > > > This seems to cause a lot of latency if the driver has a lot of vlans.
> > > >
> > > > I wonder if it's simply to let all vlan traffic go by disabling
> > > > CTRL_VLAN feature at vDPA layer.
> > >
> >
> > The guest will not be able to recover that vlan configuration.
>
> Spec said it's best effort and we actually don't do this for
> vhost-net/user for years and manage to survive.
>

If that is accepted I'm ok to do it that way.

> >
> > > Another idea is to extend the spec to allow us to accept a bitmap of
> > > the vlan ids via a single command, then we will be fine.
> > >
> >
> > I'm not sure if adding more ways to configure something is the answer,
> > but I'd be ok to implement it.
> >
> > Another idea is to allow the sending of many CVQ commands in parallel.
> > It shouldn't be very hard to achieve using exposed buffers as ring
> > buffers, and it will short down the start of the devices with many
> > features.
>
> In the extreme case, what if guests decide to filter all of the vlans?
> It means we need MAX_VLAN commands which may exceeds the size of the
> control virtqueue.
>

The buffers should be used in a circular manner: If not every vlan /
cmd could be sent, we should wait for previous buffers completion and
keep sending.

This would speed up not only vlan initialization but all NIC
configuration loading.

> Thanks
>
> >
> > Thanks!
> >
> > > Thanks
> > >
> > > >
> > > > Thanks
> > > >
> > > > > +                if (unlikely(r != 0)) {
> > > > > +                    return r;
> > > > > +                }
> > > > > +            }
> > > > > +        }
> > > > > +    }
> > > > > +
> > > > > +    return 0;
> > > > > +}
> > > > > +
> > > > >  static int vhost_vdpa_net_load(NetClientState *nc)
> > > > >  {
> > > > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > > @@ -445,8 +488,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
> > > > >      if (unlikely(r)) {
> > > > >          return r;
> > > > >      }
> > > > > -
> > > > > -    return 0;
> > > > > +    return vhost_vdpa_net_load_vlan(s, n);
> > > > >  }
> > > > >
> > > > >  static NetClientInfo net_vhost_vdpa_cvq_info = {
> > > > > --
> > > > > 2.31.1
> > > > >
> > >
> >
>
Eugenio Perez Martin Sept. 14, 2022, 11:11 a.m. UTC | #8
On Fri, Sep 9, 2022 at 10:38 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Fri, Sep 09, 2022 at 10:01:16AM +0200, Eugenio Perez Martin wrote:
> > On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> > > > >
> > > > > To have enabled vlans at device startup may happen in the destination of
> > > > > a live migration, so this configuration must be restored.
> > > > >
> > > > > At this moment the code is not accessible, since SVQ refuses to start if
> > > > > vlan feature is exposed by the device.
> > > > >
> > > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > > > ---
> > > > >  net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> > > > >  1 file changed, 44 insertions(+), 2 deletions(-)
> > > > >
> > > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > > > index 4bc3fd01a8..ecbfd08eb9 100644
> > > > > --- a/net/vhost-vdpa.c
> > > > > +++ b/net/vhost-vdpa.c
> > > > > @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> > > > >      BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> > > > >      BIT_ULL(VIRTIO_NET_F_STANDBY);
> > > > >
> > > > > +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> > > > > +
> > > > >  VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> > > > >  {
> > > > >      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > > @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> > > > >      return *s->status != VIRTIO_NET_OK;
> > > > >  }
> > > > >
> > > > > +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> > > > > +                                           const VirtIONet *n,
> > > > > +                                           uint16_t vid)
> > > > > +{
> > > > > +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> > > > > +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> > > > > +                                                  &vid, sizeof(vid));
> > > > > +    if (unlikely(dev_written < 0)) {
> > > > > +        return dev_written;
> > > > > +    }
> > > > > +
> > > > > +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> > > > > +        return -EINVAL;
> > > > > +    }
> > > > > +
> > > > > +    return 0;
> > > > > +}
> > > > > +
> > > > > +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> > > > > +                                    const VirtIONet *n)
> > > > > +{
> > > > > +    uint64_t features = n->parent_obj.guest_features;
> > > > > +
> > > > > +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> > > > > +        return 0;
> > > > > +    }
> > > > > +
> > > > > +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> > > > > +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> > > > > +            if (n->vlans[i] & (1U << j)) {
> > > > > +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
> > > >
> > > > This seems to cause a lot of latency if the driver has a lot of vlans.
> > > >
> > > > I wonder if it's simply to let all vlan traffic go by disabling
> > > > CTRL_VLAN feature at vDPA layer.
> > >
> >
> > The guest will not be able to recover that vlan configuration.
> >
> > > Another idea is to extend the spec to allow us to accept a bitmap of
> > > the vlan ids via a single command, then we will be fine.
> > >
> >
> > I'm not sure if adding more ways to configure something is the answer,
> > but I'd be ok to implement it.
> >
> > Another idea is to allow the sending of many CVQ commands in parallel.
> > It shouldn't be very hard to achieve using exposed buffers as ring
> > buffers, and it will short down the start of the devices with many
> > features.
>
> This would seem like a reasonable second step.  A first step would be to
> measure the overhead to make sure there's actually a need to optimize
> this.
>

I totally agree.

The processing time of each command is limited by the device. Taking
this to the extreme, we could achieve almost instantaneous
configuration for vdpa_sim_net, but a new device could take forever to
configure each mac.

So I think it should be considered as an optimization on top too, and
apply only if we see the initialization is slow on any devices.

Thanks!
Si-Wei Liu Sept. 14, 2022, 11:32 a.m. UTC | #9
On 9/14/2022 3:20 AM, Jason Wang wrote:
> On Fri, Sep 9, 2022 at 4:02 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
>> On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
>>> On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
>>>> On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
>>>>> To have enabled vlans at device startup may happen in the destination of
>>>>> a live migration, so this configuration must be restored.
>>>>>
>>>>> At this moment the code is not accessible, since SVQ refuses to start if
>>>>> vlan feature is exposed by the device.
>>>>>
>>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
>>>>> ---
>>>>>   net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
>>>>>   1 file changed, 44 insertions(+), 2 deletions(-)
>>>>>
>>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
>>>>> index 4bc3fd01a8..ecbfd08eb9 100644
>>>>> --- a/net/vhost-vdpa.c
>>>>> +++ b/net/vhost-vdpa.c
>>>>> @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
>>>>>       BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
>>>>>       BIT_ULL(VIRTIO_NET_F_STANDBY);
>>>>>
>>>>> +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
>>>>> +
>>>>>   VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
>>>>>   {
>>>>>       VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
>>>>> @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
>>>>>       return *s->status != VIRTIO_NET_OK;
>>>>>   }
>>>>>
>>>>> +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
>>>>> +                                           const VirtIONet *n,
>>>>> +                                           uint16_t vid)
>>>>> +{
>>>>> +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
>>>>> +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
>>>>> +                                                  &vid, sizeof(vid));
>>>>> +    if (unlikely(dev_written < 0)) {
>>>>> +        return dev_written;
>>>>> +    }
>>>>> +
>>>>> +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
>>>>> +        return -EINVAL;
>>>>> +    }
>>>>> +
>>>>> +    return 0;
>>>>> +}
>>>>> +
>>>>> +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
>>>>> +                                    const VirtIONet *n)
>>>>> +{
>>>>> +    uint64_t features = n->parent_obj.guest_features;
>>>>> +
>>>>> +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
>>>>> +        return 0;
>>>>> +    }
>>>>> +
>>>>> +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
>>>>> +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
>>>>> +            if (n->vlans[i] & (1U << j)) {
>>>>> +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
>>>> This seems to cause a lot of latency if the driver has a lot of vlans.
>>>>
>>>> I wonder if it's simply to let all vlan traffic go by disabling
>>>> CTRL_VLAN feature at vDPA layer.
>> The guest will not be able to recover that vlan configuration.
> Spec said it's best effort and we actually don't do this for
> vhost-net/user for years and manage to survive.
I thought there's a border line between best effort and do nothing. ;-)

I think that the reason this could survive is really software 
implementation specific - say if the backend doesn't start with VLAN 
filtering disabled (meaning allow all VLAN traffic to pass) then it 
would become a problem. This won't be a problem for almost PF NICs but 
may be problematic for vDPA e.g. VF backed VDPAs.
>
>>> Another idea is to extend the spec to allow us to accept a bitmap of
>>> the vlan ids via a single command, then we will be fine.
>>>
>> I'm not sure if adding more ways to configure something is the answer,
>> but I'd be ok to implement it.
>>
>> Another idea is to allow the sending of many CVQ commands in parallel.
>> It shouldn't be very hard to achieve using exposed buffers as ring
>> buffers, and it will short down the start of the devices with many
>> features.
> In the extreme case, what if guests decide to filter all of the vlans?
> It means we need MAX_VLAN commands which may exceeds the size of the
> control virtqueue.
Indeed, that's a case where a single flat device state blob would be 
more efficient for hardware drivers to apply than individual control 
command messages do.

-Siwei
>
> Thanks
>
>> Thanks!
>>
>>> Thanks
>>>
>>>> Thanks
>>>>
>>>>> +                if (unlikely(r != 0)) {
>>>>> +                    return r;
>>>>> +                }
>>>>> +            }
>>>>> +        }
>>>>> +    }
>>>>> +
>>>>> +    return 0;
>>>>> +}
>>>>> +
>>>>>   static int vhost_vdpa_net_load(NetClientState *nc)
>>>>>   {
>>>>>       VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
>>>>> @@ -445,8 +488,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
>>>>>       if (unlikely(r)) {
>>>>>           return r;
>>>>>       }
>>>>> -
>>>>> -    return 0;
>>>>> +    return vhost_vdpa_net_load_vlan(s, n);
>>>>>   }
>>>>>
>>>>>   static NetClientInfo net_vhost_vdpa_cvq_info = {
>>>>> --
>>>>> 2.31.1
>>>>>
Eugenio Perez Martin Sept. 14, 2022, 1:57 p.m. UTC | #10
On Wed, Sep 14, 2022 at 1:33 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
>
>
> On 9/14/2022 3:20 AM, Jason Wang wrote:
> > On Fri, Sep 9, 2022 at 4:02 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
> >> On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
> >>> On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
> >>>> On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> >>>>> To have enabled vlans at device startup may happen in the destination of
> >>>>> a live migration, so this configuration must be restored.
> >>>>>
> >>>>> At this moment the code is not accessible, since SVQ refuses to start if
> >>>>> vlan feature is exposed by the device.
> >>>>>
> >>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> >>>>> ---
> >>>>>   net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> >>>>>   1 file changed, 44 insertions(+), 2 deletions(-)
> >>>>>
> >>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> >>>>> index 4bc3fd01a8..ecbfd08eb9 100644
> >>>>> --- a/net/vhost-vdpa.c
> >>>>> +++ b/net/vhost-vdpa.c
> >>>>> @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> >>>>>       BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> >>>>>       BIT_ULL(VIRTIO_NET_F_STANDBY);
> >>>>>
> >>>>> +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> >>>>> +
> >>>>>   VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> >>>>>   {
> >>>>>       VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> >>>>> @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> >>>>>       return *s->status != VIRTIO_NET_OK;
> >>>>>   }
> >>>>>
> >>>>> +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> >>>>> +                                           const VirtIONet *n,
> >>>>> +                                           uint16_t vid)
> >>>>> +{
> >>>>> +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> >>>>> +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> >>>>> +                                                  &vid, sizeof(vid));
> >>>>> +    if (unlikely(dev_written < 0)) {
> >>>>> +        return dev_written;
> >>>>> +    }
> >>>>> +
> >>>>> +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> >>>>> +        return -EINVAL;
> >>>>> +    }
> >>>>> +
> >>>>> +    return 0;
> >>>>> +}
> >>>>> +
> >>>>> +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> >>>>> +                                    const VirtIONet *n)
> >>>>> +{
> >>>>> +    uint64_t features = n->parent_obj.guest_features;
> >>>>> +
> >>>>> +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> >>>>> +        return 0;
> >>>>> +    }
> >>>>> +
> >>>>> +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> >>>>> +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> >>>>> +            if (n->vlans[i] & (1U << j)) {
> >>>>> +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
> >>>> This seems to cause a lot of latency if the driver has a lot of vlans.
> >>>>
> >>>> I wonder if it's simply to let all vlan traffic go by disabling
> >>>> CTRL_VLAN feature at vDPA layer.
> >> The guest will not be able to recover that vlan configuration.
> > Spec said it's best effort and we actually don't do this for
> > vhost-net/user for years and manage to survive.
> I thought there's a border line between best effort and do nothing. ;-)
>

I also think it is worth at least trying to migrate it for sure. For
MAC there may be too many entries, but VLAN should be totally
manageable (and the information is already there, the bitmap is
actually being migrated).

> I think that the reason this could survive is really software
> implementation specific - say if the backend doesn't start with VLAN
> filtering disabled (meaning allow all VLAN traffic to pass) then it
> would become a problem. This won't be a problem for almost PF NICs but
> may be problematic for vDPA e.g. VF backed VDPAs.

I'd say the driver would expect all vlan filters to be cleared after a
reset, isn't it? The spec doesn't explicitly say anything about that
as far as I see.

> >
> >>> Another idea is to extend the spec to allow us to accept a bitmap of
> >>> the vlan ids via a single command, then we will be fine.
> >>>
> >> I'm not sure if adding more ways to configure something is the answer,
> >> but I'd be ok to implement it.
> >>
> >> Another idea is to allow the sending of many CVQ commands in parallel.
> >> It shouldn't be very hard to achieve using exposed buffers as ring
> >> buffers, and it will short down the start of the devices with many
> >> features.
> > In the extreme case, what if guests decide to filter all of the vlans?
> > It means we need MAX_VLAN commands which may exceeds the size of the
> > control virtqueue.
> Indeed, that's a case where a single flat device state blob would be
> more efficient for hardware drivers to apply than individual control
> command messages do.
>

If we're going that route, being able to set a bitmap for vlan
filtering (Jason's proposal) seems to solve more issues in the same
shot, doesn't it?

Thanks!
Si-Wei Liu Sept. 14, 2022, 3:43 p.m. UTC | #11
On 9/14/2022 2:57 PM, Eugenio Perez Martin wrote:
> On Wed, Sep 14, 2022 at 1:33 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>>
>>
>> On 9/14/2022 3:20 AM, Jason Wang wrote:
>>> On Fri, Sep 9, 2022 at 4:02 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
>>>> On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
>>>>> On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
>>>>>> On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
>>>>>>> To have enabled vlans at device startup may happen in the destination of
>>>>>>> a live migration, so this configuration must be restored.
>>>>>>>
>>>>>>> At this moment the code is not accessible, since SVQ refuses to start if
>>>>>>> vlan feature is exposed by the device.
>>>>>>>
>>>>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
>>>>>>> ---
>>>>>>>    net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
>>>>>>>    1 file changed, 44 insertions(+), 2 deletions(-)
>>>>>>>
>>>>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
>>>>>>> index 4bc3fd01a8..ecbfd08eb9 100644
>>>>>>> --- a/net/vhost-vdpa.c
>>>>>>> +++ b/net/vhost-vdpa.c
>>>>>>> @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
>>>>>>>        BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
>>>>>>>        BIT_ULL(VIRTIO_NET_F_STANDBY);
>>>>>>>
>>>>>>> +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
>>>>>>> +
>>>>>>>    VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
>>>>>>>    {
>>>>>>>        VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
>>>>>>> @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
>>>>>>>        return *s->status != VIRTIO_NET_OK;
>>>>>>>    }
>>>>>>>
>>>>>>> +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
>>>>>>> +                                           const VirtIONet *n,
>>>>>>> +                                           uint16_t vid)
>>>>>>> +{
>>>>>>> +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
>>>>>>> +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
>>>>>>> +                                                  &vid, sizeof(vid));
>>>>>>> +    if (unlikely(dev_written < 0)) {
>>>>>>> +        return dev_written;
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
>>>>>>> +        return -EINVAL;
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    return 0;
>>>>>>> +}
>>>>>>> +
>>>>>>> +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
>>>>>>> +                                    const VirtIONet *n)
>>>>>>> +{
>>>>>>> +    uint64_t features = n->parent_obj.guest_features;
>>>>>>> +
>>>>>>> +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
>>>>>>> +        return 0;
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
>>>>>>> +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
>>>>>>> +            if (n->vlans[i] & (1U << j)) {
>>>>>>> +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
>>>>>> This seems to cause a lot of latency if the driver has a lot of vlans.
>>>>>>
>>>>>> I wonder if it's simply to let all vlan traffic go by disabling
>>>>>> CTRL_VLAN feature at vDPA layer.
>>>> The guest will not be able to recover that vlan configuration.
>>> Spec said it's best effort and we actually don't do this for
>>> vhost-net/user for years and manage to survive.
>> I thought there's a border line between best effort and do nothing. ;-)
>>
> I also think it is worth at least trying to migrate it for sure. For
> MAC there may be too many entries, but VLAN should be totally
> manageable (and the information is already there, the bitmap is
> actually being migrated).
The vlan bitmap is migrated, though you'd still need to add vlan filter 
one by one through ctrl vq commands, correct? AFAIK you can add one 
filter for one single vlan ID at a time via VIRTIO_NET_CTRL_VLAN_ADD.

>
>> I think that the reason this could survive is really software
>> implementation specific - say if the backend doesn't start with VLAN
>> filtering disabled (meaning allow all VLAN traffic to pass) then it
>> would become a problem. This won't be a problem for almost PF NICs but
>> may be problematic for vDPA e.g. VF backed VDPAs.
> I'd say the driver would expect all vlan filters to be cleared after a
> reset, isn't it?
If I understand the intended behavior (from QEMU implementation) 
correctly, when VIRTIO_NET_F_CTRL_VLAN is negotiated it would start with 
all VLANs filtered (meaning only untagged traffic can be received and 
traffic with VLAN tag would get dropped). However, if 
VIRTIO_NET_F_CTRL_VLAN is not negotiated, all traffic including untagged 
and tagged can be received.

>   The spec doesn't explicitly say anything about that
> as far as I see.
Here the spec is totally ruled by the (software artifact of) 
implementation rather than what a real device is expected to work with 
VLAN rx filters. Are we sure we'd stick to this flawed device 
implementation? The guest driver seems to be agnostic with this broken 
spec behavior so far, and I am afraid it's an overkill to add another 
feature bit or ctrl command to VLAN filter in clean way.

>
>>>>> Another idea is to extend the spec to allow us to accept a bitmap of
>>>>> the vlan ids via a single command, then we will be fine.
>>>>>
>>>> I'm not sure if adding more ways to configure something is the answer,
>>>> but I'd be ok to implement it.
>>>>
>>>> Another idea is to allow the sending of many CVQ commands in parallel.
>>>> It shouldn't be very hard to achieve using exposed buffers as ring
>>>> buffers, and it will short down the start of the devices with many
>>>> features.
>>> In the extreme case, what if guests decide to filter all of the vlans?
>>> It means we need MAX_VLAN commands which may exceeds the size of the
>>> control virtqueue.
>> Indeed, that's a case where a single flat device state blob would be
>> more efficient for hardware drivers to apply than individual control
>> command messages do.
>>
> If we're going that route, being able to set a bitmap for vlan
> filtering (Jason's proposal) seems to solve more issues in the same
> shot, doesn't it?
If I understand correctly about Jason's proposal, it's a spec extension 
already to add multiple VLAN IDs in a row. This seems not much different 
than the device state blob for the resume API idea I just presented in 
the KVM forum (which also needs to extend the spec in another way)?

struct virtio_mig_cfg_net_ctrl_vlan {
     struct virtio_mig_state_header hdr;
     le32 vlans[128];
};

What is additional benefit that makes it able to solve more issues?

  -Siwei

>
> Thanks!
>
Jason Wang Sept. 15, 2022, 2:40 a.m. UTC | #12
On Wed, Sep 14, 2022 at 7:33 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
>
>
> On 9/14/2022 3:20 AM, Jason Wang wrote:
> > On Fri, Sep 9, 2022 at 4:02 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
> >> On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
> >>> On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
> >>>> On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> >>>>> To have enabled vlans at device startup may happen in the destination of
> >>>>> a live migration, so this configuration must be restored.
> >>>>>
> >>>>> At this moment the code is not accessible, since SVQ refuses to start if
> >>>>> vlan feature is exposed by the device.
> >>>>>
> >>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> >>>>> ---
> >>>>>   net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> >>>>>   1 file changed, 44 insertions(+), 2 deletions(-)
> >>>>>
> >>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> >>>>> index 4bc3fd01a8..ecbfd08eb9 100644
> >>>>> --- a/net/vhost-vdpa.c
> >>>>> +++ b/net/vhost-vdpa.c
> >>>>> @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> >>>>>       BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> >>>>>       BIT_ULL(VIRTIO_NET_F_STANDBY);
> >>>>>
> >>>>> +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> >>>>> +
> >>>>>   VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> >>>>>   {
> >>>>>       VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> >>>>> @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> >>>>>       return *s->status != VIRTIO_NET_OK;
> >>>>>   }
> >>>>>
> >>>>> +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> >>>>> +                                           const VirtIONet *n,
> >>>>> +                                           uint16_t vid)
> >>>>> +{
> >>>>> +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> >>>>> +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> >>>>> +                                                  &vid, sizeof(vid));
> >>>>> +    if (unlikely(dev_written < 0)) {
> >>>>> +        return dev_written;
> >>>>> +    }
> >>>>> +
> >>>>> +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> >>>>> +        return -EINVAL;
> >>>>> +    }
> >>>>> +
> >>>>> +    return 0;
> >>>>> +}
> >>>>> +
> >>>>> +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> >>>>> +                                    const VirtIONet *n)
> >>>>> +{
> >>>>> +    uint64_t features = n->parent_obj.guest_features;
> >>>>> +
> >>>>> +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> >>>>> +        return 0;
> >>>>> +    }
> >>>>> +
> >>>>> +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> >>>>> +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> >>>>> +            if (n->vlans[i] & (1U << j)) {
> >>>>> +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
> >>>> This seems to cause a lot of latency if the driver has a lot of vlans.
> >>>>
> >>>> I wonder if it's simply to let all vlan traffic go by disabling
> >>>> CTRL_VLAN feature at vDPA layer.
> >> The guest will not be able to recover that vlan configuration.
> > Spec said it's best effort and we actually don't do this for
> > vhost-net/user for years and manage to survive.
> I thought there's a border line between best effort and do nothing. ;-)
>
> I think that the reason this could survive is really software
> implementation specific - say if the backend doesn't start with VLAN
> filtering disabled (meaning allow all VLAN traffic to pass) then it
> would become a problem. This won't be a problem for almost PF NICs but
> may be problematic for vDPA e.g. VF backed VDPAs.

So it looks like an issue of the implementation. If CTRL_VLAN is not
negotiated, the device should disable vlan filters.


> >
> >>> Another idea is to extend the spec to allow us to accept a bitmap of
> >>> the vlan ids via a single command, then we will be fine.
> >>>
> >> I'm not sure if adding more ways to configure something is the answer,
> >> but I'd be ok to implement it.
> >>
> >> Another idea is to allow the sending of many CVQ commands in parallel.
> >> It shouldn't be very hard to achieve using exposed buffers as ring
> >> buffers, and it will short down the start of the devices with many
> >> features.
> > In the extreme case, what if guests decide to filter all of the vlans?
> > It means we need MAX_VLAN commands which may exceeds the size of the
> > control virtqueue.
> Indeed, that's a case where a single flat device state blob would be
> more efficient for hardware drivers to apply than individual control
> command messages do.

Right, so we can optimize the spec for this.

Thanks

>
> -Siwei
> >
> > Thanks
> >
> >> Thanks!
> >>
> >>> Thanks
> >>>
> >>>> Thanks
> >>>>
> >>>>> +                if (unlikely(r != 0)) {
> >>>>> +                    return r;
> >>>>> +                }
> >>>>> +            }
> >>>>> +        }
> >>>>> +    }
> >>>>> +
> >>>>> +    return 0;
> >>>>> +}
> >>>>> +
> >>>>>   static int vhost_vdpa_net_load(NetClientState *nc)
> >>>>>   {
> >>>>>       VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> >>>>> @@ -445,8 +488,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
> >>>>>       if (unlikely(r)) {
> >>>>>           return r;
> >>>>>       }
> >>>>> -
> >>>>> -    return 0;
> >>>>> +    return vhost_vdpa_net_load_vlan(s, n);
> >>>>>   }
> >>>>>
> >>>>>   static NetClientInfo net_vhost_vdpa_cvq_info = {
> >>>>> --
> >>>>> 2.31.1
> >>>>>
>
Jason Wang Sept. 15, 2022, 2:45 a.m. UTC | #13
On Wed, Sep 14, 2022 at 11:44 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
>
>
> On 9/14/2022 2:57 PM, Eugenio Perez Martin wrote:
> > On Wed, Sep 14, 2022 at 1:33 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
> >>
> >>
> >> On 9/14/2022 3:20 AM, Jason Wang wrote:
> >>> On Fri, Sep 9, 2022 at 4:02 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
> >>>> On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
> >>>>> On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
> >>>>>> On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> >>>>>>> To have enabled vlans at device startup may happen in the destination of
> >>>>>>> a live migration, so this configuration must be restored.
> >>>>>>>
> >>>>>>> At this moment the code is not accessible, since SVQ refuses to start if
> >>>>>>> vlan feature is exposed by the device.
> >>>>>>>
> >>>>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> >>>>>>> ---
> >>>>>>>    net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> >>>>>>>    1 file changed, 44 insertions(+), 2 deletions(-)
> >>>>>>>
> >>>>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> >>>>>>> index 4bc3fd01a8..ecbfd08eb9 100644
> >>>>>>> --- a/net/vhost-vdpa.c
> >>>>>>> +++ b/net/vhost-vdpa.c
> >>>>>>> @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> >>>>>>>        BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> >>>>>>>        BIT_ULL(VIRTIO_NET_F_STANDBY);
> >>>>>>>
> >>>>>>> +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> >>>>>>> +
> >>>>>>>    VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> >>>>>>>    {
> >>>>>>>        VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> >>>>>>> @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> >>>>>>>        return *s->status != VIRTIO_NET_OK;
> >>>>>>>    }
> >>>>>>>
> >>>>>>> +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> >>>>>>> +                                           const VirtIONet *n,
> >>>>>>> +                                           uint16_t vid)
> >>>>>>> +{
> >>>>>>> +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> >>>>>>> +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> >>>>>>> +                                                  &vid, sizeof(vid));
> >>>>>>> +    if (unlikely(dev_written < 0)) {
> >>>>>>> +        return dev_written;
> >>>>>>> +    }
> >>>>>>> +
> >>>>>>> +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> >>>>>>> +        return -EINVAL;
> >>>>>>> +    }
> >>>>>>> +
> >>>>>>> +    return 0;
> >>>>>>> +}
> >>>>>>> +
> >>>>>>> +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> >>>>>>> +                                    const VirtIONet *n)
> >>>>>>> +{
> >>>>>>> +    uint64_t features = n->parent_obj.guest_features;
> >>>>>>> +
> >>>>>>> +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> >>>>>>> +        return 0;
> >>>>>>> +    }
> >>>>>>> +
> >>>>>>> +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> >>>>>>> +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> >>>>>>> +            if (n->vlans[i] & (1U << j)) {
> >>>>>>> +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
> >>>>>> This seems to cause a lot of latency if the driver has a lot of vlans.
> >>>>>>
> >>>>>> I wonder if it's simply to let all vlan traffic go by disabling
> >>>>>> CTRL_VLAN feature at vDPA layer.
> >>>> The guest will not be able to recover that vlan configuration.
> >>> Spec said it's best effort and we actually don't do this for
> >>> vhost-net/user for years and manage to survive.
> >> I thought there's a border line between best effort and do nothing. ;-)
> >>
> > I also think it is worth at least trying to migrate it for sure. For
> > MAC there may be too many entries, but VLAN should be totally
> > manageable (and the information is already there, the bitmap is
> > actually being migrated).
> The vlan bitmap is migrated, though you'd still need to add vlan filter
> one by one through ctrl vq commands, correct? AFAIK you can add one
> filter for one single vlan ID at a time via VIRTIO_NET_CTRL_VLAN_ADD.
>
> >
> >> I think that the reason this could survive is really software
> >> implementation specific - say if the backend doesn't start with VLAN
> >> filtering disabled (meaning allow all VLAN traffic to pass) then it
> >> would become a problem. This won't be a problem for almost PF NICs but
> >> may be problematic for vDPA e.g. VF backed VDPAs.
> > I'd say the driver would expect all vlan filters to be cleared after a
> > reset, isn't it?
> If I understand the intended behavior (from QEMU implementation)
> correctly, when VIRTIO_NET_F_CTRL_VLAN is negotiated it would start with
> all VLANs filtered (meaning only untagged traffic can be received and
> traffic with VLAN tag would get dropped). However, if
> VIRTIO_NET_F_CTRL_VLAN is not negotiated, all traffic including untagged
> and tagged can be received.
>
> >   The spec doesn't explicitly say anything about that
> > as far as I see.
> Here the spec is totally ruled by the (software artifact of)
> implementation rather than what a real device is expected to work with
> VLAN rx filters. Are we sure we'd stick to this flawed device
> implementation? The guest driver seems to be agnostic with this broken
> spec behavior so far, and I am afraid it's an overkill to add another
> feature bit or ctrl command to VLAN filter in clean way.
>
> >
> >>>>> Another idea is to extend the spec to allow us to accept a bitmap of
> >>>>> the vlan ids via a single command, then we will be fine.
> >>>>>
> >>>> I'm not sure if adding more ways to configure something is the answer,
> >>>> but I'd be ok to implement it.
> >>>>
> >>>> Another idea is to allow the sending of many CVQ commands in parallel.
> >>>> It shouldn't be very hard to achieve using exposed buffers as ring
> >>>> buffers, and it will short down the start of the devices with many
> >>>> features.
> >>> In the extreme case, what if guests decide to filter all of the vlans?
> >>> It means we need MAX_VLAN commands which may exceeds the size of the
> >>> control virtqueue.
> >> Indeed, that's a case where a single flat device state blob would be
> >> more efficient for hardware drivers to apply than individual control
> >> command messages do.
> >>
> > If we're going that route, being able to set a bitmap for vlan
> > filtering (Jason's proposal) seems to solve more issues in the same
> > shot, doesn't it?
> If I understand correctly about Jason's proposal, it's a spec extension
> already to add multiple VLAN IDs in a row. This seems not much different
> than the device state blob for the resume API idea I just presented in
> the KVM forum (which also needs to extend the spec in another way)?
>
> struct virtio_mig_cfg_net_ctrl_vlan {
>      struct virtio_mig_state_header hdr;
>      le32 vlans[128];
> };
>

Some thoughts, actually, it's a choice of

1) trap and emulate

vs

2) load and save

So far vDPA chooses the way for method 1 (but it doesn't exclude the
possibility of method 2). The main "problem" in method 2 is that it
may not work or still require trap in the nesting layer.

For example, in the case of ctrl_vlan, if we simply extend the spec
with an additional command, trap and emulate still work like a charm.

For load and save, it looks to me it would be better to start the work
in the spec (NV is trying to do this). Going with vDPA is also fine if
spec turns out to be too hard.

Thanks

> What is additional benefit that makes it able to solve more issues?
>
>   -Siwei
>
> >
> > Thanks!
> >
>
Eugenio Perez Martin Sept. 16, 2022, 1:45 p.m. UTC | #14
On Wed, Sep 14, 2022 at 5:44 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
>
>
> On 9/14/2022 2:57 PM, Eugenio Perez Martin wrote:
> > On Wed, Sep 14, 2022 at 1:33 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
> >>
> >>
> >> On 9/14/2022 3:20 AM, Jason Wang wrote:
> >>> On Fri, Sep 9, 2022 at 4:02 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
> >>>> On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
> >>>>> On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
> >>>>>> On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> >>>>>>> To have enabled vlans at device startup may happen in the destination of
> >>>>>>> a live migration, so this configuration must be restored.
> >>>>>>>
> >>>>>>> At this moment the code is not accessible, since SVQ refuses to start if
> >>>>>>> vlan feature is exposed by the device.
> >>>>>>>
> >>>>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> >>>>>>> ---
> >>>>>>>    net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
> >>>>>>>    1 file changed, 44 insertions(+), 2 deletions(-)
> >>>>>>>
> >>>>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> >>>>>>> index 4bc3fd01a8..ecbfd08eb9 100644
> >>>>>>> --- a/net/vhost-vdpa.c
> >>>>>>> +++ b/net/vhost-vdpa.c
> >>>>>>> @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
> >>>>>>>        BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> >>>>>>>        BIT_ULL(VIRTIO_NET_F_STANDBY);
> >>>>>>>
> >>>>>>> +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
> >>>>>>> +
> >>>>>>>    VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> >>>>>>>    {
> >>>>>>>        VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> >>>>>>> @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
> >>>>>>>        return *s->status != VIRTIO_NET_OK;
> >>>>>>>    }
> >>>>>>>
> >>>>>>> +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
> >>>>>>> +                                           const VirtIONet *n,
> >>>>>>> +                                           uint16_t vid)
> >>>>>>> +{
> >>>>>>> +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
> >>>>>>> +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
> >>>>>>> +                                                  &vid, sizeof(vid));
> >>>>>>> +    if (unlikely(dev_written < 0)) {
> >>>>>>> +        return dev_written;
> >>>>>>> +    }
> >>>>>>> +
> >>>>>>> +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
> >>>>>>> +        return -EINVAL;
> >>>>>>> +    }
> >>>>>>> +
> >>>>>>> +    return 0;
> >>>>>>> +}
> >>>>>>> +
> >>>>>>> +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
> >>>>>>> +                                    const VirtIONet *n)
> >>>>>>> +{
> >>>>>>> +    uint64_t features = n->parent_obj.guest_features;
> >>>>>>> +
> >>>>>>> +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
> >>>>>>> +        return 0;
> >>>>>>> +    }
> >>>>>>> +
> >>>>>>> +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
> >>>>>>> +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
> >>>>>>> +            if (n->vlans[i] & (1U << j)) {
> >>>>>>> +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
> >>>>>> This seems to cause a lot of latency if the driver has a lot of vlans.
> >>>>>>
> >>>>>> I wonder if it's simply to let all vlan traffic go by disabling
> >>>>>> CTRL_VLAN feature at vDPA layer.
> >>>> The guest will not be able to recover that vlan configuration.
> >>> Spec said it's best effort and we actually don't do this for
> >>> vhost-net/user for years and manage to survive.
> >> I thought there's a border line between best effort and do nothing. ;-)
> >>
> > I also think it is worth at least trying to migrate it for sure. For
> > MAC there may be too many entries, but VLAN should be totally
> > manageable (and the information is already there, the bitmap is
> > actually being migrated).
> The vlan bitmap is migrated, though you'd still need to add vlan filter
> one by one through ctrl vq commands, correct? AFAIK you can add one
> filter for one single vlan ID at a time via VIRTIO_NET_CTRL_VLAN_ADD.
>
> >
> >> I think that the reason this could survive is really software
> >> implementation specific - say if the backend doesn't start with VLAN
> >> filtering disabled (meaning allow all VLAN traffic to pass) then it
> >> would become a problem. This won't be a problem for almost PF NICs but
> >> may be problematic for vDPA e.g. VF backed VDPAs.
> > I'd say the driver would expect all vlan filters to be cleared after a
> > reset, isn't it?
> If I understand the intended behavior (from QEMU implementation)
> correctly, when VIRTIO_NET_F_CTRL_VLAN is negotiated it would start with
> all VLANs filtered (meaning only untagged traffic can be received and
> traffic with VLAN tag would get dropped). However, if
> VIRTIO_NET_F_CTRL_VLAN is not negotiated, all traffic including untagged
> and tagged can be received.
>
> >   The spec doesn't explicitly say anything about that
> > as far as I see.
> Here the spec is totally ruled by the (software artifact of)
> implementation rather than what a real device is expected to work with
> VLAN rx filters. Are we sure we'd stick to this flawed device
> implementation? The guest driver seems to be agnostic with this broken
> spec behavior so far, and I am afraid it's an overkill to add another
> feature bit or ctrl command to VLAN filter in clean way.
>

I agree with all of the above. So, double checking, all vlan should be
allowed by default at device start? Maybe the spec needs to be more
clear in that regard?

> >
> >>>>> Another idea is to extend the spec to allow us to accept a bitmap of
> >>>>> the vlan ids via a single command, then we will be fine.
> >>>>>
> >>>> I'm not sure if adding more ways to configure something is the answer,
> >>>> but I'd be ok to implement it.
> >>>>
> >>>> Another idea is to allow the sending of many CVQ commands in parallel.
> >>>> It shouldn't be very hard to achieve using exposed buffers as ring
> >>>> buffers, and it will short down the start of the devices with many
> >>>> features.
> >>> In the extreme case, what if guests decide to filter all of the vlans?
> >>> It means we need MAX_VLAN commands which may exceeds the size of the
> >>> control virtqueue.
> >> Indeed, that's a case where a single flat device state blob would be
> >> more efficient for hardware drivers to apply than individual control
> >> command messages do.
> >>
> > If we're going that route, being able to set a bitmap for vlan
> > filtering (Jason's proposal) seems to solve more issues in the same
> > shot, doesn't it?
> If I understand correctly about Jason's proposal, it's a spec extension
> already to add multiple VLAN IDs in a row. This seems not much different
> than the device state blob for the resume API idea I just presented in
> the KVM forum (which also needs to extend the spec in another way)?
>
> struct virtio_mig_cfg_net_ctrl_vlan {
>      struct virtio_mig_state_header hdr;
>      le32 vlans[128];
> };
>
> What is additional benefit that makes it able to solve more issues?
>

Maybe I totally misunderstood you.

I don't think the solution to this problem is to create a call from
the VMM to restore all the device state (vlan, mac, stats, ...),
because that would not allow the guest to configure many vlan filters
in one shot and retain all other config.

If your proposal is like Jason's in that regard, I'm totally ok with
it, and I don't have a strong opinion about the layout. Maybe I would
not name it "mig_state", since it would be used by the guest in other
contexts than migration.

In other words, we should handle this outside of the migration scope,
because the guest may use it to set many vlan filters in one shot,
retaining other configuration. No more, no less.

Thanks!
Si-Wei Liu Sept. 21, 2022, 11 p.m. UTC | #15
On 9/16/2022 6:45 AM, Eugenio Perez Martin wrote:
> On Wed, Sep 14, 2022 at 5:44 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>>
>>
>> On 9/14/2022 2:57 PM, Eugenio Perez Martin wrote:
>>> On Wed, Sep 14, 2022 at 1:33 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>>>>
>>>> On 9/14/2022 3:20 AM, Jason Wang wrote:
>>>>> On Fri, Sep 9, 2022 at 4:02 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
>>>>>> On Fri, Sep 9, 2022 at 8:40 AM Jason Wang <jasowang@redhat.com> wrote:
>>>>>>> On Fri, Sep 9, 2022 at 2:38 PM Jason Wang <jasowang@redhat.com> wrote:
>>>>>>>> On Wed, Sep 7, 2022 at 12:36 AM Eugenio Pérez <eperezma@redhat.com> wrote:
>>>>>>>>> To have enabled vlans at device startup may happen in the destination of
>>>>>>>>> a live migration, so this configuration must be restored.
>>>>>>>>>
>>>>>>>>> At this moment the code is not accessible, since SVQ refuses to start if
>>>>>>>>> vlan feature is exposed by the device.
>>>>>>>>>
>>>>>>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
>>>>>>>>> ---
>>>>>>>>>     net/vhost-vdpa.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
>>>>>>>>>     1 file changed, 44 insertions(+), 2 deletions(-)
>>>>>>>>>
>>>>>>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
>>>>>>>>> index 4bc3fd01a8..ecbfd08eb9 100644
>>>>>>>>> --- a/net/vhost-vdpa.c
>>>>>>>>> +++ b/net/vhost-vdpa.c
>>>>>>>>> @@ -100,6 +100,8 @@ static const uint64_t vdpa_svq_device_features =
>>>>>>>>>         BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
>>>>>>>>>         BIT_ULL(VIRTIO_NET_F_STANDBY);
>>>>>>>>>
>>>>>>>>> +#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
>>>>>>>>> +
>>>>>>>>>     VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
>>>>>>>>>     {
>>>>>>>>>         VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
>>>>>>>>> @@ -423,6 +425,47 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
>>>>>>>>>         return *s->status != VIRTIO_NET_OK;
>>>>>>>>>     }
>>>>>>>>>
>>>>>>>>> +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
>>>>>>>>> +                                           const VirtIONet *n,
>>>>>>>>> +                                           uint16_t vid)
>>>>>>>>> +{
>>>>>>>>> +    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
>>>>>>>>> +                                                  VIRTIO_NET_CTRL_VLAN_ADD,
>>>>>>>>> +                                                  &vid, sizeof(vid));
>>>>>>>>> +    if (unlikely(dev_written < 0)) {
>>>>>>>>> +        return dev_written;
>>>>>>>>> +    }
>>>>>>>>> +
>>>>>>>>> +    if (unlikely(*s->status != VIRTIO_NET_OK)) {
>>>>>>>>> +        return -EINVAL;
>>>>>>>>> +    }
>>>>>>>>> +
>>>>>>>>> +    return 0;
>>>>>>>>> +}
>>>>>>>>> +
>>>>>>>>> +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
>>>>>>>>> +                                    const VirtIONet *n)
>>>>>>>>> +{
>>>>>>>>> +    uint64_t features = n->parent_obj.guest_features;
>>>>>>>>> +
>>>>>>>>> +    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
>>>>>>>>> +        return 0;
>>>>>>>>> +    }
>>>>>>>>> +
>>>>>>>>> +    for (int i = 0; i < MAX_VLAN >> 5; i++) {
>>>>>>>>> +        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
>>>>>>>>> +            if (n->vlans[i] & (1U << j)) {
>>>>>>>>> +                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
>>>>>>>> This seems to cause a lot of latency if the driver has a lot of vlans.
>>>>>>>>
>>>>>>>> I wonder if it's simply to let all vlan traffic go by disabling
>>>>>>>> CTRL_VLAN feature at vDPA layer.
>>>>>> The guest will not be able to recover that vlan configuration.
>>>>> Spec said it's best effort and we actually don't do this for
>>>>> vhost-net/user for years and manage to survive.
>>>> I thought there's a border line between best effort and do nothing. ;-)
>>>>
>>> I also think it is worth at least trying to migrate it for sure. For
>>> MAC there may be too many entries, but VLAN should be totally
>>> manageable (and the information is already there, the bitmap is
>>> actually being migrated).
>> The vlan bitmap is migrated, though you'd still need to add vlan filter
>> one by one through ctrl vq commands, correct? AFAIK you can add one
>> filter for one single vlan ID at a time via VIRTIO_NET_CTRL_VLAN_ADD.
>>
>>>> I think that the reason this could survive is really software
>>>> implementation specific - say if the backend doesn't start with VLAN
>>>> filtering disabled (meaning allow all VLAN traffic to pass) then it
>>>> would become a problem. This won't be a problem for almost PF NICs but
>>>> may be problematic for vDPA e.g. VF backed VDPAs.
>>> I'd say the driver would expect all vlan filters to be cleared after a
>>> reset, isn't it?
>> If I understand the intended behavior (from QEMU implementation)
>> correctly, when VIRTIO_NET_F_CTRL_VLAN is negotiated it would start with
>> all VLANs filtered (meaning only untagged traffic can be received and
>> traffic with VLAN tag would get dropped). However, if
>> VIRTIO_NET_F_CTRL_VLAN is not negotiated, all traffic including untagged
>> and tagged can be received.
>>
>>>    The spec doesn't explicitly say anything about that
>>> as far as I see.
>> Here the spec is totally ruled by the (software artifact of)
>> implementation rather than what a real device is expected to work with
>> VLAN rx filters. Are we sure we'd stick to this flawed device
>> implementation? The guest driver seems to be agnostic with this broken
>> spec behavior so far, and I am afraid it's an overkill to add another
>> feature bit or ctrl command to VLAN filter in clean way.
>>
> I agree with all of the above. So, double checking, all vlan should be
> allowed by default at device start?
That is true only when VIRTIO_NET_F_CTRL_VLAN is not negotiated. If the 
guest already negotiated VIRTIO_NET_F_CTRL_VLAN before being migrated, 
device should resume with all VLANs filtered/disallowed.

>   Maybe the spec needs to be more
> clear in that regard?
Yes, I think this is crucial. Otherwise we can't get consistent 
behavior, either from software to vDPA, or cross various vDPA vendors.
>
>>>>>>> Another idea is to extend the spec to allow us to accept a bitmap of
>>>>>>> the vlan ids via a single command, then we will be fine.
>>>>>>>
>>>>>> I'm not sure if adding more ways to configure something is the answer,
>>>>>> but I'd be ok to implement it.
>>>>>>
>>>>>> Another idea is to allow the sending of many CVQ commands in parallel.
>>>>>> It shouldn't be very hard to achieve using exposed buffers as ring
>>>>>> buffers, and it will short down the start of the devices with many
>>>>>> features.
>>>>> In the extreme case, what if guests decide to filter all of the vlans?
>>>>> It means we need MAX_VLAN commands which may exceeds the size of the
>>>>> control virtqueue.
>>>> Indeed, that's a case where a single flat device state blob would be
>>>> more efficient for hardware drivers to apply than individual control
>>>> command messages do.
>>>>
>>> If we're going that route, being able to set a bitmap for vlan
>>> filtering (Jason's proposal) seems to solve more issues in the same
>>> shot, doesn't it?
>> If I understand correctly about Jason's proposal, it's a spec extension
>> already to add multiple VLAN IDs in a row. This seems not much different
>> than the device state blob for the resume API idea I just presented in
>> the KVM forum (which also needs to extend the spec in another way)?
>>
>> struct virtio_mig_cfg_net_ctrl_vlan {
>>       struct virtio_mig_state_header hdr;
>>       le32 vlans[128];
>> };
>>
>> What is additional benefit that makes it able to solve more issues?
>>
> Maybe I totally misunderstood you.
>
> I don't think the solution to this problem is to create a call from
> the VMM to restore all the device state (vlan, mac, stats, ...),
> because that would not allow the guest to configure many vlan filters
> in one shot and retain all other config.
Noted I was simply asking question. I am trying to understand if there 
exists a use case *out of migration* that the guest would require 
programming multiple vlan filters in a row rather than do multiple 
VIRTIO_NET_CTRL_VLAN_ADD calls, adding one vlan filter each. If there's 
indeed such a use case, then it's a nice addition out of the migration 
case, I agree.

Thanks,
-Siwei

>
> If your proposal is like Jason's in that regard, I'm totally ok with
> it, and I don't have a strong opinion about the layout. Maybe I would
> not name it "mig_state", since it would be used by the guest in other
> contexts than migration.
>
> In other words, we should handle this outside of the migration scope,
> because the guest may use it to set many vlan filters in one shot,
> retaining other configuration. No more, no less.
>
> Thanks!
>
Michael S. Tsirkin Sept. 29, 2022, 7:13 a.m. UTC | #16
On Wed, Sep 21, 2022 at 04:00:58PM -0700, Si-Wei Liu wrote:
> > > >    The spec doesn't explicitly say anything about that
> > > > as far as I see.
> > > Here the spec is totally ruled by the (software artifact of)
> > > implementation rather than what a real device is expected to work with
> > > VLAN rx filters. Are we sure we'd stick to this flawed device
> > > implementation? The guest driver seems to be agnostic with this broken
> > > spec behavior so far, and I am afraid it's an overkill to add another
> > > feature bit or ctrl command to VLAN filter in clean way.
> > > 
> > I agree with all of the above. So, double checking, all vlan should be
> > allowed by default at device start?
> That is true only when VIRTIO_NET_F_CTRL_VLAN is not negotiated. If the
> guest already negotiated VIRTIO_NET_F_CTRL_VLAN before being migrated,
> device should resume with all VLANs filtered/disallowed.
> 
> >   Maybe the spec needs to be more
> > clear in that regard?
> Yes, I think this is crucial. Otherwise we can't get consistent behavior,
> either from software to vDPA, or cross various vDPA vendors.

OK. Can you open a github issue for the spec? We'll try to address.
Also, is it ok if we make it a SHOULD, i.e. best effort filtering?
Si-Wei Liu Oct. 4, 2022, 10:33 p.m. UTC | #17
On 9/29/2022 12:13 AM, Michael S. Tsirkin wrote:
> On Wed, Sep 21, 2022 at 04:00:58PM -0700, Si-Wei Liu wrote:
>>>>>     The spec doesn't explicitly say anything about that
>>>>> as far as I see.
>>>> Here the spec is totally ruled by the (software artifact of)
>>>> implementation rather than what a real device is expected to work with
>>>> VLAN rx filters. Are we sure we'd stick to this flawed device
>>>> implementation? The guest driver seems to be agnostic with this broken
>>>> spec behavior so far, and I am afraid it's an overkill to add another
>>>> feature bit or ctrl command to VLAN filter in clean way.
>>>>
>>> I agree with all of the above. So, double checking, all vlan should be
>>> allowed by default at device start?
>> That is true only when VIRTIO_NET_F_CTRL_VLAN is not negotiated. If the
>> guest already negotiated VIRTIO_NET_F_CTRL_VLAN before being migrated,
>> device should resume with all VLANs filtered/disallowed.
>>
>>>    Maybe the spec needs to be more
>>> clear in that regard?
>> Yes, I think this is crucial. Otherwise we can't get consistent behavior,
>> either from software to vDPA, or cross various vDPA vendors.
> OK. Can you open a github issue for the spec? We'll try to address.
Thanks, ticket filed at:
https://github.com/oasis-tcs/virtio-spec/issues/147
> Also, is it ok if we make it a SHOULD, i.e. best effort filtering?
>
Yes, that's fine.

-Siwei
diff mbox series

Patch

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 4bc3fd01a8..ecbfd08eb9 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -100,6 +100,8 @@  static const uint64_t vdpa_svq_device_features =
     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
     BIT_ULL(VIRTIO_NET_F_STANDBY);
 
+#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
+
 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
 {
     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
@@ -423,6 +425,47 @@  static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
     return *s->status != VIRTIO_NET_OK;
 }
 
+static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
+                                           const VirtIONet *n,
+                                           uint16_t vid)
+{
+    ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
+                                                  VIRTIO_NET_CTRL_VLAN_ADD,
+                                                  &vid, sizeof(vid));
+    if (unlikely(dev_written < 0)) {
+        return dev_written;
+    }
+
+    if (unlikely(*s->status != VIRTIO_NET_OK)) {
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
+                                    const VirtIONet *n)
+{
+    uint64_t features = n->parent_obj.guest_features;
+
+    if (!(features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) {
+        return 0;
+    }
+
+    for (int i = 0; i < MAX_VLAN >> 5; i++) {
+        for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
+            if (n->vlans[i] & (1U << j)) {
+                int r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
+                if (unlikely(r != 0)) {
+                    return r;
+                }
+            }
+        }
+    }
+
+    return 0;
+}
+
 static int vhost_vdpa_net_load(NetClientState *nc)
 {
     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
@@ -445,8 +488,7 @@  static int vhost_vdpa_net_load(NetClientState *nc)
     if (unlikely(r)) {
         return r;
     }
-
-    return 0;
+    return vhost_vdpa_net_load_vlan(s, n);
 }
 
 static NetClientInfo net_vhost_vdpa_cvq_info = {