Message ID | 20221108170755.92768-10-eperezma@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | ASID support in vhost-vdpa net | expand |
On Wed, Nov 9, 2022 at 1:08 AM Eugenio Pérez <eperezma@redhat.com> wrote: > > The memory listener that thells the device how to convert GPA to qemu's > va is registered against CVQ vhost_vdpa. This series try to map the > memory listener translations to ASID 0, while it maps the CVQ ones to > ASID 1. > > Let's tell the listener if it needs to register them on iova tree or > not. > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com> > --- > v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by > value. > --- > include/hw/virtio/vhost-vdpa.h | 2 ++ > hw/virtio/vhost-vdpa.c | 6 +++--- > net/vhost-vdpa.c | 1 + > 3 files changed, 6 insertions(+), 3 deletions(-) > > diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h > index 6560bb9d78..0c3ed2d69b 100644 > --- a/include/hw/virtio/vhost-vdpa.h > +++ b/include/hw/virtio/vhost-vdpa.h > @@ -34,6 +34,8 @@ typedef struct vhost_vdpa { > struct vhost_vdpa_iova_range iova_range; > uint64_t acked_features; > bool shadow_vqs_enabled; > + /* The listener must send iova tree addresses, not GPA */ > + bool listener_shadow_vq; > /* IOVA mapping used by the Shadow Virtqueue */ > VhostIOVATree *iova_tree; > GPtrArray *shadow_vqs; > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c > index 8fd32ba32b..e3914fa40e 100644 > --- a/hw/virtio/vhost-vdpa.c > +++ b/hw/virtio/vhost-vdpa.c > @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > vaddr, section->readonly); > > llsize = int128_sub(llend, int128_make64(iova)); > - if (v->shadow_vqs_enabled) { > + if (v->listener_shadow_vq) { > int r; > > mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, > @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > return; > > fail_map: > - if (v->shadow_vqs_enabled) { > + if (v->listener_shadow_vq) { > vhost_iova_tree_remove(v->iova_tree, mem_region); > } > > @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, > > llsize = int128_sub(llend, int128_make64(iova)); > > - if (v->shadow_vqs_enabled) { > + if (v->listener_shadow_vq) { > const DMAMap *result; > const void *vaddr = memory_region_get_ram_ptr(section->mr) + > section->offset_within_region + > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index 85a318faca..02780ee37b 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > s->vhost_vdpa.index = queue_pair_index; > s->always_svq = svq; > s->vhost_vdpa.shadow_vqs_enabled = svq; > + s->vhost_vdpa.listener_shadow_vq = svq; Any chance those above two can differ? Thanks > s->vhost_vdpa.iova_tree = iova_tree; > if (!is_datapath) { > s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(), > -- > 2.31.1 >
On Thu, Nov 10, 2022 at 7:01 AM Jason Wang <jasowang@redhat.com> wrote: > > On Wed, Nov 9, 2022 at 1:08 AM Eugenio Pérez <eperezma@redhat.com> wrote: > > > > The memory listener that thells the device how to convert GPA to qemu's > > va is registered against CVQ vhost_vdpa. This series try to map the > > memory listener translations to ASID 0, while it maps the CVQ ones to > > ASID 1. > > > > Let's tell the listener if it needs to register them on iova tree or > > not. > > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com> > > --- > > v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by > > value. > > --- > > include/hw/virtio/vhost-vdpa.h | 2 ++ > > hw/virtio/vhost-vdpa.c | 6 +++--- > > net/vhost-vdpa.c | 1 + > > 3 files changed, 6 insertions(+), 3 deletions(-) > > > > diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h > > index 6560bb9d78..0c3ed2d69b 100644 > > --- a/include/hw/virtio/vhost-vdpa.h > > +++ b/include/hw/virtio/vhost-vdpa.h > > @@ -34,6 +34,8 @@ typedef struct vhost_vdpa { > > struct vhost_vdpa_iova_range iova_range; > > uint64_t acked_features; > > bool shadow_vqs_enabled; > > + /* The listener must send iova tree addresses, not GPA */ > > + bool listener_shadow_vq; > > /* IOVA mapping used by the Shadow Virtqueue */ > > VhostIOVATree *iova_tree; > > GPtrArray *shadow_vqs; > > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c > > index 8fd32ba32b..e3914fa40e 100644 > > --- a/hw/virtio/vhost-vdpa.c > > +++ b/hw/virtio/vhost-vdpa.c > > @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > > vaddr, section->readonly); > > > > llsize = int128_sub(llend, int128_make64(iova)); > > - if (v->shadow_vqs_enabled) { > > + if (v->listener_shadow_vq) { > > int r; > > > > mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, > > @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > > return; > > > > fail_map: > > - if (v->shadow_vqs_enabled) { > > + if (v->listener_shadow_vq) { > > vhost_iova_tree_remove(v->iova_tree, mem_region); > > } > > > > @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, > > > > llsize = int128_sub(llend, int128_make64(iova)); > > > > - if (v->shadow_vqs_enabled) { > > + if (v->listener_shadow_vq) { > > const DMAMap *result; > > const void *vaddr = memory_region_get_ram_ptr(section->mr) + > > section->offset_within_region + > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > > index 85a318faca..02780ee37b 100644 > > --- a/net/vhost-vdpa.c > > +++ b/net/vhost-vdpa.c > > @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > > s->vhost_vdpa.index = queue_pair_index; > > s->always_svq = svq; > > s->vhost_vdpa.shadow_vqs_enabled = svq; > > + s->vhost_vdpa.listener_shadow_vq = svq; > > Any chance those above two can differ? > If CVQ is shadowed but data VQs are not, shadow_vqs_enabled is true but listener_shadow_vq is not. It is more clear in the next commit, where only shadow_vqs_enabled is set to true at vhost_vdpa_net_cvq_start. Thanks! > Thanks > > > s->vhost_vdpa.iova_tree = iova_tree; > > if (!is_datapath) { > > s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(), > > -- > > 2.31.1 > > >
在 2022/11/10 21:47, Eugenio Perez Martin 写道: > On Thu, Nov 10, 2022 at 7:01 AM Jason Wang <jasowang@redhat.com> wrote: >> On Wed, Nov 9, 2022 at 1:08 AM Eugenio Pérez <eperezma@redhat.com> wrote: >>> The memory listener that thells the device how to convert GPA to qemu's >>> va is registered against CVQ vhost_vdpa. This series try to map the >>> memory listener translations to ASID 0, while it maps the CVQ ones to >>> ASID 1. >>> >>> Let's tell the listener if it needs to register them on iova tree or >>> not. >>> >>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> >>> --- >>> v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by >>> value. >>> --- >>> include/hw/virtio/vhost-vdpa.h | 2 ++ >>> hw/virtio/vhost-vdpa.c | 6 +++--- >>> net/vhost-vdpa.c | 1 + >>> 3 files changed, 6 insertions(+), 3 deletions(-) >>> >>> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h >>> index 6560bb9d78..0c3ed2d69b 100644 >>> --- a/include/hw/virtio/vhost-vdpa.h >>> +++ b/include/hw/virtio/vhost-vdpa.h >>> @@ -34,6 +34,8 @@ typedef struct vhost_vdpa { >>> struct vhost_vdpa_iova_range iova_range; >>> uint64_t acked_features; >>> bool shadow_vqs_enabled; >>> + /* The listener must send iova tree addresses, not GPA */ Btw, cindy's vIOMMU series will make it not necessarily GPA any more. >>> + bool listener_shadow_vq; >>> /* IOVA mapping used by the Shadow Virtqueue */ >>> VhostIOVATree *iova_tree; >>> GPtrArray *shadow_vqs; >>> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c >>> index 8fd32ba32b..e3914fa40e 100644 >>> --- a/hw/virtio/vhost-vdpa.c >>> +++ b/hw/virtio/vhost-vdpa.c >>> @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, >>> vaddr, section->readonly); >>> >>> llsize = int128_sub(llend, int128_make64(iova)); >>> - if (v->shadow_vqs_enabled) { >>> + if (v->listener_shadow_vq) { >>> int r; >>> >>> mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, >>> @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, >>> return; >>> >>> fail_map: >>> - if (v->shadow_vqs_enabled) { >>> + if (v->listener_shadow_vq) { >>> vhost_iova_tree_remove(v->iova_tree, mem_region); >>> } >>> >>> @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, >>> >>> llsize = int128_sub(llend, int128_make64(iova)); >>> >>> - if (v->shadow_vqs_enabled) { >>> + if (v->listener_shadow_vq) { >>> const DMAMap *result; >>> const void *vaddr = memory_region_get_ram_ptr(section->mr) + >>> section->offset_within_region + >>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c >>> index 85a318faca..02780ee37b 100644 >>> --- a/net/vhost-vdpa.c >>> +++ b/net/vhost-vdpa.c >>> @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, >>> s->vhost_vdpa.index = queue_pair_index; >>> s->always_svq = svq; >>> s->vhost_vdpa.shadow_vqs_enabled = svq; >>> + s->vhost_vdpa.listener_shadow_vq = svq; >> Any chance those above two can differ? >> > If CVQ is shadowed but data VQs are not, shadow_vqs_enabled is true > but listener_shadow_vq is not. > > It is more clear in the next commit, where only shadow_vqs_enabled is > set to true at vhost_vdpa_net_cvq_start. Ok, the name looks a little bit confusing. I wonder if it's better to use shadow_cvq and shadow_data ? Thanks > > Thanks! > >> Thanks >> >>> s->vhost_vdpa.iova_tree = iova_tree; >>> if (!is_datapath) { >>> s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(), >>> -- >>> 2.31.1 >>>
On Fri, Nov 11, 2022 at 8:49 AM Jason Wang <jasowang@redhat.com> wrote: > > > 在 2022/11/10 21:47, Eugenio Perez Martin 写道: > > On Thu, Nov 10, 2022 at 7:01 AM Jason Wang <jasowang@redhat.com> wrote: > >> On Wed, Nov 9, 2022 at 1:08 AM Eugenio Pérez <eperezma@redhat.com> wrote: > >>> The memory listener that thells the device how to convert GPA to qemu's > >>> va is registered against CVQ vhost_vdpa. This series try to map the > >>> memory listener translations to ASID 0, while it maps the CVQ ones to > >>> ASID 1. > >>> > >>> Let's tell the listener if it needs to register them on iova tree or > >>> not. > >>> > >>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> > >>> --- > >>> v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by > >>> value. > >>> --- > >>> include/hw/virtio/vhost-vdpa.h | 2 ++ > >>> hw/virtio/vhost-vdpa.c | 6 +++--- > >>> net/vhost-vdpa.c | 1 + > >>> 3 files changed, 6 insertions(+), 3 deletions(-) > >>> > >>> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h > >>> index 6560bb9d78..0c3ed2d69b 100644 > >>> --- a/include/hw/virtio/vhost-vdpa.h > >>> +++ b/include/hw/virtio/vhost-vdpa.h > >>> @@ -34,6 +34,8 @@ typedef struct vhost_vdpa { > >>> struct vhost_vdpa_iova_range iova_range; > >>> uint64_t acked_features; > >>> bool shadow_vqs_enabled; > >>> + /* The listener must send iova tree addresses, not GPA */ > > > Btw, cindy's vIOMMU series will make it not necessarily GPA any more. > Yes, this comment should be tuned then. But the SVQ iova_tree will not be equal to vIOMMU one because shadow vrings. But maybe SVQ can inspect both instead of having all the duplicated entries. > > >>> + bool listener_shadow_vq; > >>> /* IOVA mapping used by the Shadow Virtqueue */ > >>> VhostIOVATree *iova_tree; > >>> GPtrArray *shadow_vqs; > >>> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c > >>> index 8fd32ba32b..e3914fa40e 100644 > >>> --- a/hw/virtio/vhost-vdpa.c > >>> +++ b/hw/virtio/vhost-vdpa.c > >>> @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > >>> vaddr, section->readonly); > >>> > >>> llsize = int128_sub(llend, int128_make64(iova)); > >>> - if (v->shadow_vqs_enabled) { > >>> + if (v->listener_shadow_vq) { > >>> int r; > >>> > >>> mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, > >>> @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > >>> return; > >>> > >>> fail_map: > >>> - if (v->shadow_vqs_enabled) { > >>> + if (v->listener_shadow_vq) { > >>> vhost_iova_tree_remove(v->iova_tree, mem_region); > >>> } > >>> > >>> @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, > >>> > >>> llsize = int128_sub(llend, int128_make64(iova)); > >>> > >>> - if (v->shadow_vqs_enabled) { > >>> + if (v->listener_shadow_vq) { > >>> const DMAMap *result; > >>> const void *vaddr = memory_region_get_ram_ptr(section->mr) + > >>> section->offset_within_region + > >>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > >>> index 85a318faca..02780ee37b 100644 > >>> --- a/net/vhost-vdpa.c > >>> +++ b/net/vhost-vdpa.c > >>> @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > >>> s->vhost_vdpa.index = queue_pair_index; > >>> s->always_svq = svq; > >>> s->vhost_vdpa.shadow_vqs_enabled = svq; > >>> + s->vhost_vdpa.listener_shadow_vq = svq; > >> Any chance those above two can differ? > >> > > If CVQ is shadowed but data VQs are not, shadow_vqs_enabled is true > > but listener_shadow_vq is not. > > > > It is more clear in the next commit, where only shadow_vqs_enabled is > > set to true at vhost_vdpa_net_cvq_start. > > > Ok, the name looks a little bit confusing. I wonder if it's better to > use shadow_cvq and shadow_data ? > I'm ok with renaming it, but struct vhost_vdpa is generic across all kind of devices, and it does not know if it is a datapath or not for the moment. Maybe listener_uses_iova_tree? Thanks!
在 2022/11/11 21:12, Eugenio Perez Martin 写道: > On Fri, Nov 11, 2022 at 8:49 AM Jason Wang <jasowang@redhat.com> wrote: >> >> 在 2022/11/10 21:47, Eugenio Perez Martin 写道: >>> On Thu, Nov 10, 2022 at 7:01 AM Jason Wang <jasowang@redhat.com> wrote: >>>> On Wed, Nov 9, 2022 at 1:08 AM Eugenio Pérez <eperezma@redhat.com> wrote: >>>>> The memory listener that thells the device how to convert GPA to qemu's >>>>> va is registered against CVQ vhost_vdpa. This series try to map the >>>>> memory listener translations to ASID 0, while it maps the CVQ ones to >>>>> ASID 1. >>>>> >>>>> Let's tell the listener if it needs to register them on iova tree or >>>>> not. >>>>> >>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> >>>>> --- >>>>> v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by >>>>> value. >>>>> --- >>>>> include/hw/virtio/vhost-vdpa.h | 2 ++ >>>>> hw/virtio/vhost-vdpa.c | 6 +++--- >>>>> net/vhost-vdpa.c | 1 + >>>>> 3 files changed, 6 insertions(+), 3 deletions(-) >>>>> >>>>> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h >>>>> index 6560bb9d78..0c3ed2d69b 100644 >>>>> --- a/include/hw/virtio/vhost-vdpa.h >>>>> +++ b/include/hw/virtio/vhost-vdpa.h >>>>> @@ -34,6 +34,8 @@ typedef struct vhost_vdpa { >>>>> struct vhost_vdpa_iova_range iova_range; >>>>> uint64_t acked_features; >>>>> bool shadow_vqs_enabled; >>>>> + /* The listener must send iova tree addresses, not GPA */ >> >> Btw, cindy's vIOMMU series will make it not necessarily GPA any more. >> > Yes, this comment should be tuned then. But the SVQ iova_tree will not > be equal to vIOMMU one because shadow vrings. > > But maybe SVQ can inspect both instead of having all the duplicated entries. > >>>>> + bool listener_shadow_vq; >>>>> /* IOVA mapping used by the Shadow Virtqueue */ >>>>> VhostIOVATree *iova_tree; >>>>> GPtrArray *shadow_vqs; >>>>> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c >>>>> index 8fd32ba32b..e3914fa40e 100644 >>>>> --- a/hw/virtio/vhost-vdpa.c >>>>> +++ b/hw/virtio/vhost-vdpa.c >>>>> @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, >>>>> vaddr, section->readonly); >>>>> >>>>> llsize = int128_sub(llend, int128_make64(iova)); >>>>> - if (v->shadow_vqs_enabled) { >>>>> + if (v->listener_shadow_vq) { >>>>> int r; >>>>> >>>>> mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, >>>>> @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, >>>>> return; >>>>> >>>>> fail_map: >>>>> - if (v->shadow_vqs_enabled) { >>>>> + if (v->listener_shadow_vq) { >>>>> vhost_iova_tree_remove(v->iova_tree, mem_region); >>>>> } >>>>> >>>>> @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, >>>>> >>>>> llsize = int128_sub(llend, int128_make64(iova)); >>>>> >>>>> - if (v->shadow_vqs_enabled) { >>>>> + if (v->listener_shadow_vq) { >>>>> const DMAMap *result; >>>>> const void *vaddr = memory_region_get_ram_ptr(section->mr) + >>>>> section->offset_within_region + >>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c >>>>> index 85a318faca..02780ee37b 100644 >>>>> --- a/net/vhost-vdpa.c >>>>> +++ b/net/vhost-vdpa.c >>>>> @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, >>>>> s->vhost_vdpa.index = queue_pair_index; >>>>> s->always_svq = svq; >>>>> s->vhost_vdpa.shadow_vqs_enabled = svq; >>>>> + s->vhost_vdpa.listener_shadow_vq = svq; >>>> Any chance those above two can differ? >>>> >>> If CVQ is shadowed but data VQs are not, shadow_vqs_enabled is true >>> but listener_shadow_vq is not. >>> >>> It is more clear in the next commit, where only shadow_vqs_enabled is >>> set to true at vhost_vdpa_net_cvq_start. >> >> Ok, the name looks a little bit confusing. I wonder if it's better to >> use shadow_cvq and shadow_data ? >> > I'm ok with renaming it, but struct vhost_vdpa is generic across all > kind of devices, and it does not know if it is a datapath or not for > the moment. > > Maybe listener_uses_iova_tree? I think "iova_tree" is something that is internal to svq implementation, it's better to define the name from the view of vhost_vdpa level. Thanks > > Thanks! >
On Mon, Nov 14, 2022 at 5:30 AM Jason Wang <jasowang@redhat.com> wrote: > > > 在 2022/11/11 21:12, Eugenio Perez Martin 写道: > > On Fri, Nov 11, 2022 at 8:49 AM Jason Wang <jasowang@redhat.com> wrote: > >> > >> 在 2022/11/10 21:47, Eugenio Perez Martin 写道: > >>> On Thu, Nov 10, 2022 at 7:01 AM Jason Wang <jasowang@redhat.com> wrote: > >>>> On Wed, Nov 9, 2022 at 1:08 AM Eugenio Pérez <eperezma@redhat.com> wrote: > >>>>> The memory listener that thells the device how to convert GPA to qemu's > >>>>> va is registered against CVQ vhost_vdpa. This series try to map the > >>>>> memory listener translations to ASID 0, while it maps the CVQ ones to > >>>>> ASID 1. > >>>>> > >>>>> Let's tell the listener if it needs to register them on iova tree or > >>>>> not. > >>>>> > >>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> > >>>>> --- > >>>>> v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by > >>>>> value. > >>>>> --- > >>>>> include/hw/virtio/vhost-vdpa.h | 2 ++ > >>>>> hw/virtio/vhost-vdpa.c | 6 +++--- > >>>>> net/vhost-vdpa.c | 1 + > >>>>> 3 files changed, 6 insertions(+), 3 deletions(-) > >>>>> > >>>>> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h > >>>>> index 6560bb9d78..0c3ed2d69b 100644 > >>>>> --- a/include/hw/virtio/vhost-vdpa.h > >>>>> +++ b/include/hw/virtio/vhost-vdpa.h > >>>>> @@ -34,6 +34,8 @@ typedef struct vhost_vdpa { > >>>>> struct vhost_vdpa_iova_range iova_range; > >>>>> uint64_t acked_features; > >>>>> bool shadow_vqs_enabled; > >>>>> + /* The listener must send iova tree addresses, not GPA */ > >> > >> Btw, cindy's vIOMMU series will make it not necessarily GPA any more. > >> > > Yes, this comment should be tuned then. But the SVQ iova_tree will not > > be equal to vIOMMU one because shadow vrings. > > > > But maybe SVQ can inspect both instead of having all the duplicated entries. > > > >>>>> + bool listener_shadow_vq; > >>>>> /* IOVA mapping used by the Shadow Virtqueue */ > >>>>> VhostIOVATree *iova_tree; > >>>>> GPtrArray *shadow_vqs; > >>>>> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c > >>>>> index 8fd32ba32b..e3914fa40e 100644 > >>>>> --- a/hw/virtio/vhost-vdpa.c > >>>>> +++ b/hw/virtio/vhost-vdpa.c > >>>>> @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > >>>>> vaddr, section->readonly); > >>>>> > >>>>> llsize = int128_sub(llend, int128_make64(iova)); > >>>>> - if (v->shadow_vqs_enabled) { > >>>>> + if (v->listener_shadow_vq) { > >>>>> int r; > >>>>> > >>>>> mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, > >>>>> @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > >>>>> return; > >>>>> > >>>>> fail_map: > >>>>> - if (v->shadow_vqs_enabled) { > >>>>> + if (v->listener_shadow_vq) { > >>>>> vhost_iova_tree_remove(v->iova_tree, mem_region); > >>>>> } > >>>>> > >>>>> @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, > >>>>> > >>>>> llsize = int128_sub(llend, int128_make64(iova)); > >>>>> > >>>>> - if (v->shadow_vqs_enabled) { > >>>>> + if (v->listener_shadow_vq) { > >>>>> const DMAMap *result; > >>>>> const void *vaddr = memory_region_get_ram_ptr(section->mr) + > >>>>> section->offset_within_region + > >>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > >>>>> index 85a318faca..02780ee37b 100644 > >>>>> --- a/net/vhost-vdpa.c > >>>>> +++ b/net/vhost-vdpa.c > >>>>> @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > >>>>> s->vhost_vdpa.index = queue_pair_index; > >>>>> s->always_svq = svq; > >>>>> s->vhost_vdpa.shadow_vqs_enabled = svq; > >>>>> + s->vhost_vdpa.listener_shadow_vq = svq; > >>>> Any chance those above two can differ? > >>>> > >>> If CVQ is shadowed but data VQs are not, shadow_vqs_enabled is true > >>> but listener_shadow_vq is not. > >>> > >>> It is more clear in the next commit, where only shadow_vqs_enabled is > >>> set to true at vhost_vdpa_net_cvq_start. > >> > >> Ok, the name looks a little bit confusing. I wonder if it's better to > >> use shadow_cvq and shadow_data ? > >> > > I'm ok with renaming it, but struct vhost_vdpa is generic across all > > kind of devices, and it does not know if it is a datapath or not for > > the moment. > > > > Maybe listener_uses_iova_tree? > > > I think "iova_tree" is something that is internal to svq implementation, > it's better to define the name from the view of vhost_vdpa level. > I don't get this, vhost_vdpa struct already has a pointer to its iova_tree. Thanks!
On Tue, Nov 15, 2022 at 12:31 AM Eugenio Perez Martin <eperezma@redhat.com> wrote: > > On Mon, Nov 14, 2022 at 5:30 AM Jason Wang <jasowang@redhat.com> wrote: > > > > > > 在 2022/11/11 21:12, Eugenio Perez Martin 写道: > > > On Fri, Nov 11, 2022 at 8:49 AM Jason Wang <jasowang@redhat.com> wrote: > > >> > > >> 在 2022/11/10 21:47, Eugenio Perez Martin 写道: > > >>> On Thu, Nov 10, 2022 at 7:01 AM Jason Wang <jasowang@redhat.com> wrote: > > >>>> On Wed, Nov 9, 2022 at 1:08 AM Eugenio Pérez <eperezma@redhat.com> wrote: > > >>>>> The memory listener that thells the device how to convert GPA to qemu's > > >>>>> va is registered against CVQ vhost_vdpa. This series try to map the > > >>>>> memory listener translations to ASID 0, while it maps the CVQ ones to > > >>>>> ASID 1. > > >>>>> > > >>>>> Let's tell the listener if it needs to register them on iova tree or > > >>>>> not. > > >>>>> > > >>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> > > >>>>> --- > > >>>>> v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by > > >>>>> value. > > >>>>> --- > > >>>>> include/hw/virtio/vhost-vdpa.h | 2 ++ > > >>>>> hw/virtio/vhost-vdpa.c | 6 +++--- > > >>>>> net/vhost-vdpa.c | 1 + > > >>>>> 3 files changed, 6 insertions(+), 3 deletions(-) > > >>>>> > > >>>>> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h > > >>>>> index 6560bb9d78..0c3ed2d69b 100644 > > >>>>> --- a/include/hw/virtio/vhost-vdpa.h > > >>>>> +++ b/include/hw/virtio/vhost-vdpa.h > > >>>>> @@ -34,6 +34,8 @@ typedef struct vhost_vdpa { > > >>>>> struct vhost_vdpa_iova_range iova_range; > > >>>>> uint64_t acked_features; > > >>>>> bool shadow_vqs_enabled; > > >>>>> + /* The listener must send iova tree addresses, not GPA */ > > >> > > >> Btw, cindy's vIOMMU series will make it not necessarily GPA any more. > > >> > > > Yes, this comment should be tuned then. But the SVQ iova_tree will not > > > be equal to vIOMMU one because shadow vrings. > > > > > > But maybe SVQ can inspect both instead of having all the duplicated entries. > > > > > >>>>> + bool listener_shadow_vq; > > >>>>> /* IOVA mapping used by the Shadow Virtqueue */ > > >>>>> VhostIOVATree *iova_tree; > > >>>>> GPtrArray *shadow_vqs; > > >>>>> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c > > >>>>> index 8fd32ba32b..e3914fa40e 100644 > > >>>>> --- a/hw/virtio/vhost-vdpa.c > > >>>>> +++ b/hw/virtio/vhost-vdpa.c > > >>>>> @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > > >>>>> vaddr, section->readonly); > > >>>>> > > >>>>> llsize = int128_sub(llend, int128_make64(iova)); > > >>>>> - if (v->shadow_vqs_enabled) { > > >>>>> + if (v->listener_shadow_vq) { > > >>>>> int r; > > >>>>> > > >>>>> mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, > > >>>>> @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > > >>>>> return; > > >>>>> > > >>>>> fail_map: > > >>>>> - if (v->shadow_vqs_enabled) { > > >>>>> + if (v->listener_shadow_vq) { > > >>>>> vhost_iova_tree_remove(v->iova_tree, mem_region); > > >>>>> } > > >>>>> > > >>>>> @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, > > >>>>> > > >>>>> llsize = int128_sub(llend, int128_make64(iova)); > > >>>>> > > >>>>> - if (v->shadow_vqs_enabled) { > > >>>>> + if (v->listener_shadow_vq) { > > >>>>> const DMAMap *result; > > >>>>> const void *vaddr = memory_region_get_ram_ptr(section->mr) + > > >>>>> section->offset_within_region + > > >>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > > >>>>> index 85a318faca..02780ee37b 100644 > > >>>>> --- a/net/vhost-vdpa.c > > >>>>> +++ b/net/vhost-vdpa.c > > >>>>> @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > > >>>>> s->vhost_vdpa.index = queue_pair_index; > > >>>>> s->always_svq = svq; > > >>>>> s->vhost_vdpa.shadow_vqs_enabled = svq; > > >>>>> + s->vhost_vdpa.listener_shadow_vq = svq; > > >>>> Any chance those above two can differ? > > >>>> > > >>> If CVQ is shadowed but data VQs are not, shadow_vqs_enabled is true > > >>> but listener_shadow_vq is not. > > >>> > > >>> It is more clear in the next commit, where only shadow_vqs_enabled is > > >>> set to true at vhost_vdpa_net_cvq_start. > > >> > > >> Ok, the name looks a little bit confusing. I wonder if it's better to > > >> use shadow_cvq and shadow_data ? > > >> > > > I'm ok with renaming it, but struct vhost_vdpa is generic across all > > > kind of devices, and it does not know if it is a datapath or not for > > > the moment. > > > > > > Maybe listener_uses_iova_tree? > > > > > > I think "iova_tree" is something that is internal to svq implementation, > > it's better to define the name from the view of vhost_vdpa level. > > > > I don't get this, vhost_vdpa struct already has a pointer to its iova_tree. Yes, this is a suggestion to improve the readability of the code. So what I meant is to have a name to demonstrate why we need to use iova_tree instead of "uses_iova_tree". Thanks > > Thanks! >
On Tue, Nov 15, 2022 at 4:04 AM Jason Wang <jasowang@redhat.com> wrote: > > On Tue, Nov 15, 2022 at 12:31 AM Eugenio Perez Martin > <eperezma@redhat.com> wrote: > > > > On Mon, Nov 14, 2022 at 5:30 AM Jason Wang <jasowang@redhat.com> wrote: > > > > > > > > > 在 2022/11/11 21:12, Eugenio Perez Martin 写道: > > > > On Fri, Nov 11, 2022 at 8:49 AM Jason Wang <jasowang@redhat.com> wrote: > > > >> > > > >> 在 2022/11/10 21:47, Eugenio Perez Martin 写道: > > > >>> On Thu, Nov 10, 2022 at 7:01 AM Jason Wang <jasowang@redhat.com> wrote: > > > >>>> On Wed, Nov 9, 2022 at 1:08 AM Eugenio Pérez <eperezma@redhat.com> wrote: > > > >>>>> The memory listener that thells the device how to convert GPA to qemu's > > > >>>>> va is registered against CVQ vhost_vdpa. This series try to map the > > > >>>>> memory listener translations to ASID 0, while it maps the CVQ ones to > > > >>>>> ASID 1. > > > >>>>> > > > >>>>> Let's tell the listener if it needs to register them on iova tree or > > > >>>>> not. > > > >>>>> > > > >>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> > > > >>>>> --- > > > >>>>> v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by > > > >>>>> value. > > > >>>>> --- > > > >>>>> include/hw/virtio/vhost-vdpa.h | 2 ++ > > > >>>>> hw/virtio/vhost-vdpa.c | 6 +++--- > > > >>>>> net/vhost-vdpa.c | 1 + > > > >>>>> 3 files changed, 6 insertions(+), 3 deletions(-) > > > >>>>> > > > >>>>> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h > > > >>>>> index 6560bb9d78..0c3ed2d69b 100644 > > > >>>>> --- a/include/hw/virtio/vhost-vdpa.h > > > >>>>> +++ b/include/hw/virtio/vhost-vdpa.h > > > >>>>> @@ -34,6 +34,8 @@ typedef struct vhost_vdpa { > > > >>>>> struct vhost_vdpa_iova_range iova_range; > > > >>>>> uint64_t acked_features; > > > >>>>> bool shadow_vqs_enabled; > > > >>>>> + /* The listener must send iova tree addresses, not GPA */ > > > >> > > > >> Btw, cindy's vIOMMU series will make it not necessarily GPA any more. > > > >> > > > > Yes, this comment should be tuned then. But the SVQ iova_tree will not > > > > be equal to vIOMMU one because shadow vrings. > > > > > > > > But maybe SVQ can inspect both instead of having all the duplicated entries. > > > > > > > >>>>> + bool listener_shadow_vq; > > > >>>>> /* IOVA mapping used by the Shadow Virtqueue */ > > > >>>>> VhostIOVATree *iova_tree; > > > >>>>> GPtrArray *shadow_vqs; > > > >>>>> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c > > > >>>>> index 8fd32ba32b..e3914fa40e 100644 > > > >>>>> --- a/hw/virtio/vhost-vdpa.c > > > >>>>> +++ b/hw/virtio/vhost-vdpa.c > > > >>>>> @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > > > >>>>> vaddr, section->readonly); > > > >>>>> > > > >>>>> llsize = int128_sub(llend, int128_make64(iova)); > > > >>>>> - if (v->shadow_vqs_enabled) { > > > >>>>> + if (v->listener_shadow_vq) { > > > >>>>> int r; > > > >>>>> > > > >>>>> mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, > > > >>>>> @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > > > >>>>> return; > > > >>>>> > > > >>>>> fail_map: > > > >>>>> - if (v->shadow_vqs_enabled) { > > > >>>>> + if (v->listener_shadow_vq) { > > > >>>>> vhost_iova_tree_remove(v->iova_tree, mem_region); > > > >>>>> } > > > >>>>> > > > >>>>> @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, > > > >>>>> > > > >>>>> llsize = int128_sub(llend, int128_make64(iova)); > > > >>>>> > > > >>>>> - if (v->shadow_vqs_enabled) { > > > >>>>> + if (v->listener_shadow_vq) { > > > >>>>> const DMAMap *result; > > > >>>>> const void *vaddr = memory_region_get_ram_ptr(section->mr) + > > > >>>>> section->offset_within_region + > > > >>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > > > >>>>> index 85a318faca..02780ee37b 100644 > > > >>>>> --- a/net/vhost-vdpa.c > > > >>>>> +++ b/net/vhost-vdpa.c > > > >>>>> @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > > > >>>>> s->vhost_vdpa.index = queue_pair_index; > > > >>>>> s->always_svq = svq; > > > >>>>> s->vhost_vdpa.shadow_vqs_enabled = svq; > > > >>>>> + s->vhost_vdpa.listener_shadow_vq = svq; > > > >>>> Any chance those above two can differ? > > > >>>> > > > >>> If CVQ is shadowed but data VQs are not, shadow_vqs_enabled is true > > > >>> but listener_shadow_vq is not. > > > >>> > > > >>> It is more clear in the next commit, where only shadow_vqs_enabled is > > > >>> set to true at vhost_vdpa_net_cvq_start. > > > >> > > > >> Ok, the name looks a little bit confusing. I wonder if it's better to > > > >> use shadow_cvq and shadow_data ? > > > >> > > > > I'm ok with renaming it, but struct vhost_vdpa is generic across all > > > > kind of devices, and it does not know if it is a datapath or not for > > > > the moment. > > > > > > > > Maybe listener_uses_iova_tree? > > > > > > > > > I think "iova_tree" is something that is internal to svq implementation, > > > it's better to define the name from the view of vhost_vdpa level. > > > > > > > I don't get this, vhost_vdpa struct already has a pointer to its iova_tree. > > Yes, this is a suggestion to improve the readability of the code. So > what I meant is to have a name to demonstrate why we need to use > iova_tree instead of "uses_iova_tree". > I understand. Knowing that the listener will be always bound to data vqs (being net, blk, ...), I think it is ok to rename it to shadow_data. But I think there is no way to add shadow_cvq properly from hw/virtio/vhost-vdpa.c , since we don't know if the vhost_vdpa belongs to a datapath or not. Would it work just to rename listener_shadow_vq to shadow_data? Thanks!
On Tue, Nov 15, 2022 at 7:25 PM Eugenio Perez Martin <eperezma@redhat.com> wrote: > > On Tue, Nov 15, 2022 at 4:04 AM Jason Wang <jasowang@redhat.com> wrote: > > > > On Tue, Nov 15, 2022 at 12:31 AM Eugenio Perez Martin > > <eperezma@redhat.com> wrote: > > > > > > On Mon, Nov 14, 2022 at 5:30 AM Jason Wang <jasowang@redhat.com> wrote: > > > > > > > > > > > > 在 2022/11/11 21:12, Eugenio Perez Martin 写道: > > > > > On Fri, Nov 11, 2022 at 8:49 AM Jason Wang <jasowang@redhat.com> wrote: > > > > >> > > > > >> 在 2022/11/10 21:47, Eugenio Perez Martin 写道: > > > > >>> On Thu, Nov 10, 2022 at 7:01 AM Jason Wang <jasowang@redhat.com> wrote: > > > > >>>> On Wed, Nov 9, 2022 at 1:08 AM Eugenio Pérez <eperezma@redhat.com> wrote: > > > > >>>>> The memory listener that thells the device how to convert GPA to qemu's > > > > >>>>> va is registered against CVQ vhost_vdpa. This series try to map the > > > > >>>>> memory listener translations to ASID 0, while it maps the CVQ ones to > > > > >>>>> ASID 1. > > > > >>>>> > > > > >>>>> Let's tell the listener if it needs to register them on iova tree or > > > > >>>>> not. > > > > >>>>> > > > > >>>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> > > > > >>>>> --- > > > > >>>>> v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by > > > > >>>>> value. > > > > >>>>> --- > > > > >>>>> include/hw/virtio/vhost-vdpa.h | 2 ++ > > > > >>>>> hw/virtio/vhost-vdpa.c | 6 +++--- > > > > >>>>> net/vhost-vdpa.c | 1 + > > > > >>>>> 3 files changed, 6 insertions(+), 3 deletions(-) > > > > >>>>> > > > > >>>>> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h > > > > >>>>> index 6560bb9d78..0c3ed2d69b 100644 > > > > >>>>> --- a/include/hw/virtio/vhost-vdpa.h > > > > >>>>> +++ b/include/hw/virtio/vhost-vdpa.h > > > > >>>>> @@ -34,6 +34,8 @@ typedef struct vhost_vdpa { > > > > >>>>> struct vhost_vdpa_iova_range iova_range; > > > > >>>>> uint64_t acked_features; > > > > >>>>> bool shadow_vqs_enabled; > > > > >>>>> + /* The listener must send iova tree addresses, not GPA */ > > > > >> > > > > >> Btw, cindy's vIOMMU series will make it not necessarily GPA any more. > > > > >> > > > > > Yes, this comment should be tuned then. But the SVQ iova_tree will not > > > > > be equal to vIOMMU one because shadow vrings. > > > > > > > > > > But maybe SVQ can inspect both instead of having all the duplicated entries. > > > > > > > > > >>>>> + bool listener_shadow_vq; > > > > >>>>> /* IOVA mapping used by the Shadow Virtqueue */ > > > > >>>>> VhostIOVATree *iova_tree; > > > > >>>>> GPtrArray *shadow_vqs; > > > > >>>>> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c > > > > >>>>> index 8fd32ba32b..e3914fa40e 100644 > > > > >>>>> --- a/hw/virtio/vhost-vdpa.c > > > > >>>>> +++ b/hw/virtio/vhost-vdpa.c > > > > >>>>> @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > > > > >>>>> vaddr, section->readonly); > > > > >>>>> > > > > >>>>> llsize = int128_sub(llend, int128_make64(iova)); > > > > >>>>> - if (v->shadow_vqs_enabled) { > > > > >>>>> + if (v->listener_shadow_vq) { > > > > >>>>> int r; > > > > >>>>> > > > > >>>>> mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, > > > > >>>>> @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, > > > > >>>>> return; > > > > >>>>> > > > > >>>>> fail_map: > > > > >>>>> - if (v->shadow_vqs_enabled) { > > > > >>>>> + if (v->listener_shadow_vq) { > > > > >>>>> vhost_iova_tree_remove(v->iova_tree, mem_region); > > > > >>>>> } > > > > >>>>> > > > > >>>>> @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, > > > > >>>>> > > > > >>>>> llsize = int128_sub(llend, int128_make64(iova)); > > > > >>>>> > > > > >>>>> - if (v->shadow_vqs_enabled) { > > > > >>>>> + if (v->listener_shadow_vq) { > > > > >>>>> const DMAMap *result; > > > > >>>>> const void *vaddr = memory_region_get_ram_ptr(section->mr) + > > > > >>>>> section->offset_within_region + > > > > >>>>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > > > > >>>>> index 85a318faca..02780ee37b 100644 > > > > >>>>> --- a/net/vhost-vdpa.c > > > > >>>>> +++ b/net/vhost-vdpa.c > > > > >>>>> @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > > > > >>>>> s->vhost_vdpa.index = queue_pair_index; > > > > >>>>> s->always_svq = svq; > > > > >>>>> s->vhost_vdpa.shadow_vqs_enabled = svq; > > > > >>>>> + s->vhost_vdpa.listener_shadow_vq = svq; > > > > >>>> Any chance those above two can differ? > > > > >>>> > > > > >>> If CVQ is shadowed but data VQs are not, shadow_vqs_enabled is true > > > > >>> but listener_shadow_vq is not. > > > > >>> > > > > >>> It is more clear in the next commit, where only shadow_vqs_enabled is > > > > >>> set to true at vhost_vdpa_net_cvq_start. > > > > >> > > > > >> Ok, the name looks a little bit confusing. I wonder if it's better to > > > > >> use shadow_cvq and shadow_data ? > > > > >> > > > > > I'm ok with renaming it, but struct vhost_vdpa is generic across all > > > > > kind of devices, and it does not know if it is a datapath or not for > > > > > the moment. > > > > > > > > > > Maybe listener_uses_iova_tree? > > > > > > > > > > > > I think "iova_tree" is something that is internal to svq implementation, > > > > it's better to define the name from the view of vhost_vdpa level. > > > > > > > > > > I don't get this, vhost_vdpa struct already has a pointer to its iova_tree. > > > > Yes, this is a suggestion to improve the readability of the code. So > > what I meant is to have a name to demonstrate why we need to use > > iova_tree instead of "uses_iova_tree". > > > > I understand. > > Knowing that the listener will be always bound to data vqs (being net, > blk, ...), I think it is ok to rename it to shadow_data. > > But I think there is no way to add shadow_cvq properly from > hw/virtio/vhost-vdpa.c , since we don't know if the vhost_vdpa belongs > to a datapath or not. Would it work just to rename listener_shadow_vq > to shadow_data? This should work. Thanks > > Thanks! >
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h index 6560bb9d78..0c3ed2d69b 100644 --- a/include/hw/virtio/vhost-vdpa.h +++ b/include/hw/virtio/vhost-vdpa.h @@ -34,6 +34,8 @@ typedef struct vhost_vdpa { struct vhost_vdpa_iova_range iova_range; uint64_t acked_features; bool shadow_vqs_enabled; + /* The listener must send iova tree addresses, not GPA */ + bool listener_shadow_vq; /* IOVA mapping used by the Shadow Virtqueue */ VhostIOVATree *iova_tree; GPtrArray *shadow_vqs; diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 8fd32ba32b..e3914fa40e 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, vaddr, section->readonly); llsize = int128_sub(llend, int128_make64(iova)); - if (v->shadow_vqs_enabled) { + if (v->listener_shadow_vq) { int r; mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, return; fail_map: - if (v->shadow_vqs_enabled) { + if (v->listener_shadow_vq) { vhost_iova_tree_remove(v->iova_tree, mem_region); } @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, llsize = int128_sub(llend, int128_make64(iova)); - if (v->shadow_vqs_enabled) { + if (v->listener_shadow_vq) { const DMAMap *result; const void *vaddr = memory_region_get_ram_ptr(section->mr) + section->offset_within_region + diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 85a318faca..02780ee37b 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.index = queue_pair_index; s->always_svq = svq; s->vhost_vdpa.shadow_vqs_enabled = svq; + s->vhost_vdpa.listener_shadow_vq = svq; s->vhost_vdpa.iova_tree = iova_tree; if (!is_datapath) { s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
The memory listener that thells the device how to convert GPA to qemu's va is registered against CVQ vhost_vdpa. This series try to map the memory listener translations to ASID 0, while it maps the CVQ ones to ASID 1. Let's tell the listener if it needs to register them on iova tree or not. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> --- v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by value. --- include/hw/virtio/vhost-vdpa.h | 2 ++ hw/virtio/vhost-vdpa.c | 6 +++--- net/vhost-vdpa.c | 1 + 3 files changed, 6 insertions(+), 3 deletions(-)