Message ID | 1603449643-12851-15-git-send-email-kwankhede@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add migration support for VFIO devices | expand |
Reviewed-by: Yan Zhao <yan.y.zhao@intel.com> On Fri, Oct 23, 2020 at 04:10:40PM +0530, Kirti Wankhede wrote: > When vIOMMU is enabled, add MAP notifier from log_sync when all > devices in container are in stop and copy phase of migration. Call replay > and then from notifier callback, get dirty pages. > > Suggested-by: Alex Williamson <alex.williamson@redhat.com> > Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> > --- > hw/vfio/common.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++---- > hw/vfio/trace-events | 1 + > 2 files changed, 83 insertions(+), 6 deletions(-) > > diff --git a/hw/vfio/common.c b/hw/vfio/common.c > index 2634387df948..c0b5b6245a47 100644 > --- a/hw/vfio/common.c > +++ b/hw/vfio/common.c > @@ -442,8 +442,8 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) > } > > /* Called with rcu_read_lock held. */ > -static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, > - bool *read_only) > +static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, > + ram_addr_t *ram_addr, bool *read_only) > { > MemoryRegion *mr; > hwaddr xlat; > @@ -474,8 +474,17 @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, > return false; > } > > - *vaddr = memory_region_get_ram_ptr(mr) + xlat; > - *read_only = !writable || mr->readonly; > + if (vaddr) { > + *vaddr = memory_region_get_ram_ptr(mr) + xlat; > + } > + > + if (ram_addr) { > + *ram_addr = memory_region_get_ram_addr(mr) + xlat; > + } > + > + if (read_only) { > + *read_only = !writable || mr->readonly; > + } > > return true; > } > @@ -485,7 +494,6 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) > VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); > VFIOContainer *container = giommu->container; > hwaddr iova = iotlb->iova + giommu->iommu_offset; > - bool read_only; > void *vaddr; > int ret; > > @@ -501,7 +509,9 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) > rcu_read_lock(); > > if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { > - if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) { > + bool read_only; > + > + if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { > goto out; > } > /* > @@ -899,11 +909,77 @@ err_out: > return ret; > } > > +typedef struct { > + IOMMUNotifier n; > + VFIOGuestIOMMU *giommu; > +} vfio_giommu_dirty_notifier; > + > +static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) > +{ > + vfio_giommu_dirty_notifier *gdn = container_of(n, > + vfio_giommu_dirty_notifier, n); > + VFIOGuestIOMMU *giommu = gdn->giommu; > + VFIOContainer *container = giommu->container; > + hwaddr iova = iotlb->iova + giommu->iommu_offset; > + ram_addr_t translated_addr; > + > + trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); > + > + if (iotlb->target_as != &address_space_memory) { > + error_report("Wrong target AS \"%s\", only system memory is allowed", > + iotlb->target_as->name ? iotlb->target_as->name : "none"); > + return; > + } > + > + rcu_read_lock(); > + if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { > + int ret; > + > + ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, > + translated_addr); > + if (ret) { > + error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " > + "0x%"HWADDR_PRIx") = %d (%m)", > + container, iova, > + iotlb->addr_mask + 1, ret); > + } > + } > + rcu_read_unlock(); > +} > + > static int vfio_sync_dirty_bitmap(VFIOContainer *container, > MemoryRegionSection *section) > { > ram_addr_t ram_addr; > > + if (memory_region_is_iommu(section->mr)) { > + VFIOGuestIOMMU *giommu; > + > + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { > + if (MEMORY_REGION(giommu->iommu) == section->mr && > + giommu->n.start == section->offset_within_region) { > + Int128 llend; > + vfio_giommu_dirty_notifier gdn = { .giommu = giommu }; > + int idx = memory_region_iommu_attrs_to_index(giommu->iommu, > + MEMTXATTRS_UNSPECIFIED); > + > + llend = int128_add(int128_make64(section->offset_within_region), > + section->size); > + llend = int128_sub(llend, int128_one()); > + > + iommu_notifier_init(&gdn.n, > + vfio_iommu_map_dirty_notify, > + IOMMU_NOTIFIER_MAP, > + section->offset_within_region, > + int128_get64(llend), > + idx); > + memory_region_iommu_replay(giommu->iommu, &gdn.n); > + break; > + } > + } > + return 0; > + } > + > ram_addr = memory_region_get_ram_addr(section->mr) + > section->offset_within_region; > > diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events > index dd991bd8f265..c0e75f24b76d 100644 > --- a/hw/vfio/trace-events > +++ b/hw/vfio/trace-events > @@ -164,3 +164,4 @@ vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64 > vfio_load_state_device_data(const char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64 > vfio_load_cleanup(const char *name) " (%s)" > vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64 > +vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 > -- > 2.7.0 >
diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 2634387df948..c0b5b6245a47 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -442,8 +442,8 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) } /* Called with rcu_read_lock held. */ -static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, - bool *read_only) +static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, + ram_addr_t *ram_addr, bool *read_only) { MemoryRegion *mr; hwaddr xlat; @@ -474,8 +474,17 @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, return false; } - *vaddr = memory_region_get_ram_ptr(mr) + xlat; - *read_only = !writable || mr->readonly; + if (vaddr) { + *vaddr = memory_region_get_ram_ptr(mr) + xlat; + } + + if (ram_addr) { + *ram_addr = memory_region_get_ram_addr(mr) + xlat; + } + + if (read_only) { + *read_only = !writable || mr->readonly; + } return true; } @@ -485,7 +494,6 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); VFIOContainer *container = giommu->container; hwaddr iova = iotlb->iova + giommu->iommu_offset; - bool read_only; void *vaddr; int ret; @@ -501,7 +509,9 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) rcu_read_lock(); if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { - if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) { + bool read_only; + + if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { goto out; } /* @@ -899,11 +909,77 @@ err_out: return ret; } +typedef struct { + IOMMUNotifier n; + VFIOGuestIOMMU *giommu; +} vfio_giommu_dirty_notifier; + +static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + vfio_giommu_dirty_notifier *gdn = container_of(n, + vfio_giommu_dirty_notifier, n); + VFIOGuestIOMMU *giommu = gdn->giommu; + VFIOContainer *container = giommu->container; + hwaddr iova = iotlb->iova + giommu->iommu_offset; + ram_addr_t translated_addr; + + trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); + + if (iotlb->target_as != &address_space_memory) { + error_report("Wrong target AS \"%s\", only system memory is allowed", + iotlb->target_as->name ? iotlb->target_as->name : "none"); + return; + } + + rcu_read_lock(); + if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { + int ret; + + ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, + translated_addr); + if (ret) { + error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%m)", + container, iova, + iotlb->addr_mask + 1, ret); + } + } + rcu_read_unlock(); +} + static int vfio_sync_dirty_bitmap(VFIOContainer *container, MemoryRegionSection *section) { ram_addr_t ram_addr; + if (memory_region_is_iommu(section->mr)) { + VFIOGuestIOMMU *giommu; + + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { + if (MEMORY_REGION(giommu->iommu) == section->mr && + giommu->n.start == section->offset_within_region) { + Int128 llend; + vfio_giommu_dirty_notifier gdn = { .giommu = giommu }; + int idx = memory_region_iommu_attrs_to_index(giommu->iommu, + MEMTXATTRS_UNSPECIFIED); + + llend = int128_add(int128_make64(section->offset_within_region), + section->size); + llend = int128_sub(llend, int128_one()); + + iommu_notifier_init(&gdn.n, + vfio_iommu_map_dirty_notify, + IOMMU_NOTIFIER_MAP, + section->offset_within_region, + int128_get64(llend), + idx); + memory_region_iommu_replay(giommu->iommu, &gdn.n); + break; + } + } + return 0; + } + ram_addr = memory_region_get_ram_addr(section->mr) + section->offset_within_region; diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index dd991bd8f265..c0e75f24b76d 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -164,3 +164,4 @@ vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64 vfio_load_state_device_data(const char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64 vfio_load_cleanup(const char *name) " (%s)" vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64 +vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64
When vIOMMU is enabled, add MAP notifier from log_sync when all devices in container are in stop and copy phase of migration. Call replay and then from notifier callback, get dirty pages. Suggested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> --- hw/vfio/common.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++---- hw/vfio/trace-events | 1 + 2 files changed, 83 insertions(+), 6 deletions(-)