@@ -1398,6 +1398,69 @@ static int vfio_init_container(VFIOContainer *container, int group_fd,
return 0;
}
+static int vfio_get_iommu_info(VFIOContainer *container,
+ struct vfio_iommu_type1_info **info)
+{
+
+ size_t argsz = sizeof(struct vfio_iommu_type1_info);
+
+ *info = g_new0(struct vfio_iommu_type1_info, 1);
+again:
+ (*info)->argsz = argsz;
+
+ if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) {
+ g_free(*info);
+ *info = NULL;
+ return -errno;
+ }
+
+ if (((*info)->argsz > argsz)) {
+ argsz = (*info)->argsz;
+ *info = g_realloc(*info, argsz);
+ goto again;
+ }
+
+ return 0;
+}
+
+static struct vfio_info_cap_header *
+vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
+{
+ struct vfio_info_cap_header *hdr;
+ void *ptr = info;
+
+ if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
+ return NULL;
+ }
+
+ for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
+ if (hdr->id == id) {
+ return hdr;
+ }
+ }
+
+ return NULL;
+}
+
+static void vfio_get_iommu_info_migration(VFIOContainer *container,
+ struct vfio_iommu_type1_info *info)
+{
+ struct vfio_info_cap_header *hdr;
+ struct vfio_iommu_type1_info_cap_migration *cap_mig;
+
+ hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION);
+ if (!hdr) {
+ return;
+ }
+
+ cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration,
+ header);
+
+ if (cap_mig->flags & VFIO_IOMMU_INFO_CAPS_MIGRATION_DIRTY_PAGE_TRACK) {
+ container->dirty_pages_supported = true;
+ }
+}
+
static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
Error **errp)
{
@@ -1462,6 +1525,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
container->space = space;
container->fd = fd;
container->error = NULL;
+ container->dirty_pages_supported = false;
QLIST_INIT(&container->giommu_list);
QLIST_INIT(&container->hostwin_list);
@@ -1474,7 +1538,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
case VFIO_TYPE1v2_IOMMU:
case VFIO_TYPE1_IOMMU:
{
- struct vfio_iommu_type1_info info;
+ struct vfio_iommu_type1_info *info;
/*
* FIXME: This assumes that a Type1 IOMMU can map any 64-bit
@@ -1483,15 +1547,20 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
* existing Type1 IOMMUs generally support any IOVA we're
* going to actually try in practice.
*/
- info.argsz = sizeof(info);
- ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
- /* Ignore errors */
- if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
+ ret = vfio_get_iommu_info(container, &info);
+ if (ret) {
+ goto free_container_exit;
+ }
+
+ if (!(info->flags & VFIO_IOMMU_INFO_PGSIZES)) {
/* Assume 4k IOVA page size */
- info.iova_pgsizes = 4096;
+ info->iova_pgsizes = 4096;
}
- vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
- container->pgsizes = info.iova_pgsizes;
+ vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes);
+ container->pgsizes = info->iova_pgsizes;
+
+ vfio_get_iommu_info_migration(container, info);
+ g_free(info);
break;
}
case VFIO_SPAPR_TCE_v2_IOMMU:
@@ -79,6 +79,7 @@ typedef struct VFIOContainer {
unsigned iommu_type;
Error *error;
bool initialized;
+ bool dirty_pages_supported;
unsigned long pgsizes;
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
Added helper functions to get IOMMU info capability chain. Added function to get migration capability flags from that capability chain for IOMMU container. Similar change was proposed earlier: https://lists.gnu.org/archive/html/qemu-devel/2018-05/msg03759.html Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Cc: Eric Auger <eric.auger@redhat.com> --- hw/vfio/common.c | 85 +++++++++++++++++++++++++++++++++++++++---- include/hw/vfio/vfio-common.h | 1 + 2 files changed, 78 insertions(+), 8 deletions(-)