@@ -34,6 +34,7 @@ typedef struct VhostVDPAHostNotifier {
typedef struct vhost_vdpa_shared {
int device_fd;
struct vhost_vdpa_iova_range iova_range;
+ QLIST_HEAD(, vdpa_iommu) iommu_list;
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
@@ -62,7 +63,6 @@ typedef struct vhost_vdpa {
struct vhost_dev *dev;
Error *migration_blocker;
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
- QLIST_HEAD(, vdpa_iommu) iommu_list;
IOMMUNotifier n;
} VhostVDPA;
@@ -279,7 +279,7 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
return;
}
- QLIST_INSERT_HEAD(&v->iommu_list, iommu, iommu_next);
+ QLIST_INSERT_HEAD(&v->shared->iommu_list, iommu, iommu_next);
memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
return;
@@ -292,7 +292,7 @@ static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
struct vdpa_iommu *iommu;
- QLIST_FOREACH(iommu, &v->iommu_list, iommu_next)
+ QLIST_FOREACH(iommu, &v->shared->iommu_list, iommu_next)
{
if (MEMORY_REGION(iommu->iommu_mr) == section->mr &&
iommu->n.start == section->offset_within_region) {
Next patches will register the vhost_vdpa memory listener while the VM is migrating at the destination, so we can map the memory to the device before stopping the VM at the source. The main goal is to reduce the downtime. However, the destination QEMU is unaware of which vhost_vdpa device will register its memory_listener. If the source guest has CVQ enabled, it will be the CVQ device. Otherwise, it will be the first one. Move the iommu_list member to VhostVDPAShared so all vhost_vdpa can use it, rather than always in the first / last vhost_vdpa. Signed-off-by: Eugenio PĂ©rez <eperezma@redhat.com> --- include/hw/virtio/vhost-vdpa.h | 2 +- hw/virtio/vhost-vdpa.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-)