Message ID | 1573578324-8389-14-git-send-email-kwankhede@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add migration support for VFIO devices | expand |
On Wed, Nov 13, 2019 at 01:05:22AM +0800, Kirti Wankhede wrote: > vfio_listener_log_sync gets list of dirty pages from container using > VFIO_IOMMU_GET_DIRTY_BITMAP ioctl and mark those pages dirty when all > devices are stopped and saving state. > Return early for the RAM block section of mapped MMIO region. > > Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> > Reviewed-by: Neo Jia <cjia@nvidia.com> > --- > hw/vfio/common.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++++++ > hw/vfio/trace-events | 1 + > 2 files changed, 104 insertions(+) > > diff --git a/hw/vfio/common.c b/hw/vfio/common.c > index ade9839c28a3..66f1c64bf074 100644 > --- a/hw/vfio/common.c > +++ b/hw/vfio/common.c > @@ -29,6 +29,7 @@ > #include "hw/vfio/vfio.h" > #include "exec/address-spaces.h" > #include "exec/memory.h" > +#include "exec/ram_addr.h" > #include "hw/hw.h" > #include "qemu/error-report.h" > #include "qemu/main-loop.h" > @@ -38,6 +39,7 @@ > #include "sysemu/reset.h" > #include "trace.h" > #include "qapi/error.h" > +#include "migration/migration.h" > > VFIOGroupList vfio_group_list = > QLIST_HEAD_INITIALIZER(vfio_group_list); > @@ -288,6 +290,28 @@ const MemoryRegionOps vfio_region_ops = { > }; > > /* > + * Device state interfaces > + */ > + > +static bool vfio_devices_are_stopped_and_saving(void) > +{ > + VFIOGroup *group; > + VFIODevice *vbasedev; > + > + QLIST_FOREACH(group, &vfio_group_list, next) { > + QLIST_FOREACH(vbasedev, &group->device_list, next) { > + if ((vbasedev->device_state & VFIO_DEVICE_STATE_SAVING) && > + !(vbasedev->device_state & VFIO_DEVICE_STATE_RUNNING)) { > + continue; > + } else { > + return false; > + } > + } > + } > + return true; > +} > + > +/* > * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 > */ > static int vfio_dma_unmap(VFIOContainer *container, > @@ -813,9 +837,88 @@ static void vfio_listener_region_del(MemoryListener *listener, > } > } > > +static int vfio_get_dirty_bitmap(VFIOContainer *container, > + MemoryRegionSection *section) > +{ > + struct vfio_iommu_type1_dirty_bitmap range; > + uint64_t bitmap_size; > + int ret; > + > + range.argsz = sizeof(range); > + > + if (memory_region_is_iommu(section->mr)) { > + VFIOGuestIOMMU *giommu; > + IOMMUTLBEntry iotlb; > + > + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { > + if (MEMORY_REGION(giommu->iommu) == section->mr && > + giommu->n.start == section->offset_within_region) { > + break; > + } > + } > + > + if (!giommu) { > + return -EINVAL; > + } > + > + iotlb = address_space_get_iotlb_entry(container->space->as, > + TARGET_PAGE_ALIGN(section->offset_within_address_space), > + true, MEMTXATTRS_UNSPECIFIED); > + range.iova = iotlb.iova + giommu->iommu_offset; > + range.size = iotlb.addr_mask + 1; > + } else { > + range.iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); > + range.size = int128_get64(section->size); > + } > + > + bitmap_size = BITS_TO_LONGS(range.size >> TARGET_PAGE_BITS) * > + sizeof(uint64_t); > + > + range.bitmap = g_try_malloc0(bitmap_size); > + if (!range.bitmap) { > + error_report("%s: Error allocating bitmap buffer of size 0x%lx", > + __func__, bitmap_size); > + return -ENOMEM; > + } > + > + range.bitmap_size = bitmap_size; > + > + ret = ioctl(container->fd, VFIO_IOMMU_GET_DIRTY_BITMAP, &range); > + From the implementation of ioctl VFIO_IOMMU_GET_DIRTY_BITMAP, this range.bitmap is indexed by iova, right? so if viommu is on, why cpu_physical_memory_set_dirty_lebitmap can be called directly here without any viommu translation? > + if (!ret) { > + cpu_physical_memory_set_dirty_lebitmap((uint64_t *)range.bitmap, > + TARGET_PAGE_ALIGN(section->offset_within_address_space), > + bitmap_size >> TARGET_PAGE_BITS); > + } else { > + error_report("VFIO_IOMMU_GET_DIRTY_BITMAP: %d %d", ret, errno); > + } > + > + trace_vfio_get_dirty_bitmap(container->fd, range.iova, range.size, > + bitmap_size); > + > + g_free(range.bitmap); > + return ret; > +} > + > +static void vfio_listerner_log_sync(MemoryListener *listener, > + MemoryRegionSection *section) > +{ > + VFIOContainer *container = container_of(listener, VFIOContainer, listener); > + > + if (memory_region_is_ram_device(section->mr)) { > + return; > + } > + how about for those devices who need to sync dirty bitmap in RUNNING and SAVING state? > + if (vfio_devices_are_stopped_and_saving()) { > + > + vfio_get_dirty_bitmap(container, section); > + } > +} > + when viommu is on, the address space registered for this MemoryListener is from VTDAddressSpace, in this address space, listener->log_sync(listener, &mrs) would not be called for lacking of dirty_log_mask. If listener->log_sync still needs to be called, some special handlings are required. > static const MemoryListener vfio_memory_listener = { > .region_add = vfio_listener_region_add, > .region_del = vfio_listener_region_del, > + .log_sync = vfio_listerner_log_sync, > }; > Thanks Yan > static void vfio_listener_release(VFIOContainer *container) > diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events > index ac065b559f4e..0dd1f2ffe648 100644 > --- a/hw/vfio/trace-events > +++ b/hw/vfio/trace-events > @@ -160,3 +160,4 @@ vfio_save_complete_precopy(char *name) " (%s)" > vfio_load_device_config_state(char *name) " (%s)" > vfio_load_state(char *name, uint64_t data) " (%s) data 0x%"PRIx64 > vfio_load_state_device_data(char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64 > +vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64 > -- > 2.7.0 >
On Tue, 12 Nov 2019 22:35:22 +0530 Kirti Wankhede <kwankhede@nvidia.com> wrote: > vfio_listener_log_sync gets list of dirty pages from container using > VFIO_IOMMU_GET_DIRTY_BITMAP ioctl and mark those pages dirty when all > devices are stopped and saving state. > Return early for the RAM block section of mapped MMIO region. > > Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> > Reviewed-by: Neo Jia <cjia@nvidia.com> > --- > hw/vfio/common.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++++++ > hw/vfio/trace-events | 1 + > 2 files changed, 104 insertions(+) > > diff --git a/hw/vfio/common.c b/hw/vfio/common.c > index ade9839c28a3..66f1c64bf074 100644 > --- a/hw/vfio/common.c > +++ b/hw/vfio/common.c > @@ -29,6 +29,7 @@ > #include "hw/vfio/vfio.h" > #include "exec/address-spaces.h" > #include "exec/memory.h" > +#include "exec/ram_addr.h" > #include "hw/hw.h" > #include "qemu/error-report.h" > #include "qemu/main-loop.h" > @@ -38,6 +39,7 @@ > #include "sysemu/reset.h" > #include "trace.h" > #include "qapi/error.h" > +#include "migration/migration.h" > > VFIOGroupList vfio_group_list = > QLIST_HEAD_INITIALIZER(vfio_group_list); > @@ -288,6 +290,28 @@ const MemoryRegionOps vfio_region_ops = { > }; > > /* > + * Device state interfaces > + */ > + > +static bool vfio_devices_are_stopped_and_saving(void) > +{ > + VFIOGroup *group; > + VFIODevice *vbasedev; > + > + QLIST_FOREACH(group, &vfio_group_list, next) { > + QLIST_FOREACH(vbasedev, &group->device_list, next) { > + if ((vbasedev->device_state & VFIO_DEVICE_STATE_SAVING) && > + !(vbasedev->device_state & VFIO_DEVICE_STATE_RUNNING)) { (device_state & MASK) == SAVING > + continue; Kind of silly to have a continue rather than just changing the polarity of the test so that we only branch into the return case. > + } else { > + return false; > + } > + } > + } > + return true; > +} > + > +/* > * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 > */ > static int vfio_dma_unmap(VFIOContainer *container, > @@ -813,9 +837,88 @@ static void vfio_listener_region_del(MemoryListener *listener, > } > } > > +static int vfio_get_dirty_bitmap(VFIOContainer *container, > + MemoryRegionSection *section) > +{ > + struct vfio_iommu_type1_dirty_bitmap range; > + uint64_t bitmap_size; > + int ret; > + > + range.argsz = sizeof(range); > + > + if (memory_region_is_iommu(section->mr)) { > + VFIOGuestIOMMU *giommu; > + IOMMUTLBEntry iotlb; > + > + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { > + if (MEMORY_REGION(giommu->iommu) == section->mr && > + giommu->n.start == section->offset_within_region) { > + break; > + } > + } > + > + if (!giommu) { > + return -EINVAL; > + } > + > + iotlb = address_space_get_iotlb_entry(container->space->as, > + TARGET_PAGE_ALIGN(section->offset_within_address_space), > + true, MEMTXATTRS_UNSPECIFIED); > + range.iova = iotlb.iova + giommu->iommu_offset; > + range.size = iotlb.addr_mask + 1; > + } else { > + range.iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); > + range.size = int128_get64(section->size); > + } > + > + bitmap_size = BITS_TO_LONGS(range.size >> TARGET_PAGE_BITS) * > + sizeof(uint64_t); > + > + range.bitmap = g_try_malloc0(bitmap_size); > + if (!range.bitmap) { > + error_report("%s: Error allocating bitmap buffer of size 0x%lx", > + __func__, bitmap_size); > + return -ENOMEM; We could certainly iterate with a smaller bitmap rather than use a single ioctl. This doesn't seem like it scales well as VM memory size increases. > + } > + > + range.bitmap_size = bitmap_size; > + > + ret = ioctl(container->fd, VFIO_IOMMU_GET_DIRTY_BITMAP, &range); > + > + if (!ret) { > + cpu_physical_memory_set_dirty_lebitmap((uint64_t *)range.bitmap, > + TARGET_PAGE_ALIGN(section->offset_within_address_space), > + bitmap_size >> TARGET_PAGE_BITS); Like Yan, I think this is relative to the iova address space and needs a translation for the vIOMMU case. > + } else { > + error_report("VFIO_IOMMU_GET_DIRTY_BITMAP: %d %d", ret, errno); > + } > + > + trace_vfio_get_dirty_bitmap(container->fd, range.iova, range.size, > + bitmap_size); > + > + g_free(range.bitmap); > + return ret; > +} > + > +static void vfio_listerner_log_sync(MemoryListener *listener, > + MemoryRegionSection *section) > +{ > + VFIOContainer *container = container_of(listener, VFIOContainer, listener); > + > + if (memory_region_is_ram_device(section->mr)) { > + return; > + } > + > + if (vfio_devices_are_stopped_and_saving()) { I think a comment is necessary here indicating why we're not participating in an iterative dirty bitmap sync. Additionally, how will the kernel indicate that we can support real dirty tracking? > + > + vfio_get_dirty_bitmap(container, section); > + } > +} > + > static const MemoryListener vfio_memory_listener = { > .region_add = vfio_listener_region_add, > .region_del = vfio_listener_region_del, > + .log_sync = vfio_listerner_log_sync, > }; > > static void vfio_listener_release(VFIOContainer *container) > diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events > index ac065b559f4e..0dd1f2ffe648 100644 > --- a/hw/vfio/trace-events > +++ b/hw/vfio/trace-events > @@ -160,3 +160,4 @@ vfio_save_complete_precopy(char *name) " (%s)" > vfio_load_device_config_state(char *name) " (%s)" > vfio_load_state(char *name, uint64_t data) " (%s) data 0x%"PRIx64 > vfio_load_state_device_data(char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64 > +vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64
diff --git a/hw/vfio/common.c b/hw/vfio/common.c index ade9839c28a3..66f1c64bf074 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -29,6 +29,7 @@ #include "hw/vfio/vfio.h" #include "exec/address-spaces.h" #include "exec/memory.h" +#include "exec/ram_addr.h" #include "hw/hw.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" @@ -38,6 +39,7 @@ #include "sysemu/reset.h" #include "trace.h" #include "qapi/error.h" +#include "migration/migration.h" VFIOGroupList vfio_group_list = QLIST_HEAD_INITIALIZER(vfio_group_list); @@ -288,6 +290,28 @@ const MemoryRegionOps vfio_region_ops = { }; /* + * Device state interfaces + */ + +static bool vfio_devices_are_stopped_and_saving(void) +{ + VFIOGroup *group; + VFIODevice *vbasedev; + + QLIST_FOREACH(group, &vfio_group_list, next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + if ((vbasedev->device_state & VFIO_DEVICE_STATE_SAVING) && + !(vbasedev->device_state & VFIO_DEVICE_STATE_RUNNING)) { + continue; + } else { + return false; + } + } + } + return true; +} + +/* * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 */ static int vfio_dma_unmap(VFIOContainer *container, @@ -813,9 +837,88 @@ static void vfio_listener_region_del(MemoryListener *listener, } } +static int vfio_get_dirty_bitmap(VFIOContainer *container, + MemoryRegionSection *section) +{ + struct vfio_iommu_type1_dirty_bitmap range; + uint64_t bitmap_size; + int ret; + + range.argsz = sizeof(range); + + if (memory_region_is_iommu(section->mr)) { + VFIOGuestIOMMU *giommu; + IOMMUTLBEntry iotlb; + + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { + if (MEMORY_REGION(giommu->iommu) == section->mr && + giommu->n.start == section->offset_within_region) { + break; + } + } + + if (!giommu) { + return -EINVAL; + } + + iotlb = address_space_get_iotlb_entry(container->space->as, + TARGET_PAGE_ALIGN(section->offset_within_address_space), + true, MEMTXATTRS_UNSPECIFIED); + range.iova = iotlb.iova + giommu->iommu_offset; + range.size = iotlb.addr_mask + 1; + } else { + range.iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); + range.size = int128_get64(section->size); + } + + bitmap_size = BITS_TO_LONGS(range.size >> TARGET_PAGE_BITS) * + sizeof(uint64_t); + + range.bitmap = g_try_malloc0(bitmap_size); + if (!range.bitmap) { + error_report("%s: Error allocating bitmap buffer of size 0x%lx", + __func__, bitmap_size); + return -ENOMEM; + } + + range.bitmap_size = bitmap_size; + + ret = ioctl(container->fd, VFIO_IOMMU_GET_DIRTY_BITMAP, &range); + + if (!ret) { + cpu_physical_memory_set_dirty_lebitmap((uint64_t *)range.bitmap, + TARGET_PAGE_ALIGN(section->offset_within_address_space), + bitmap_size >> TARGET_PAGE_BITS); + } else { + error_report("VFIO_IOMMU_GET_DIRTY_BITMAP: %d %d", ret, errno); + } + + trace_vfio_get_dirty_bitmap(container->fd, range.iova, range.size, + bitmap_size); + + g_free(range.bitmap); + return ret; +} + +static void vfio_listerner_log_sync(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, listener); + + if (memory_region_is_ram_device(section->mr)) { + return; + } + + if (vfio_devices_are_stopped_and_saving()) { + + vfio_get_dirty_bitmap(container, section); + } +} + static const MemoryListener vfio_memory_listener = { .region_add = vfio_listener_region_add, .region_del = vfio_listener_region_del, + .log_sync = vfio_listerner_log_sync, }; static void vfio_listener_release(VFIOContainer *container) diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index ac065b559f4e..0dd1f2ffe648 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -160,3 +160,4 @@ vfio_save_complete_precopy(char *name) " (%s)" vfio_load_device_config_state(char *name) " (%s)" vfio_load_state(char *name, uint64_t data) " (%s) data 0x%"PRIx64 vfio_load_state_device_data(char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64 +vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64