@@ -31,6 +31,9 @@ struct VhostIOVATree {
/* Allocated IOVA addresses */
IOVATree *iova_map;
+
+ /* GPA to IOVA address memory maps */
+ IOVATree *gpa_iova_map;
};
/**
@@ -48,6 +51,7 @@ VhostIOVATree *vhost_iova_tree_new(hwaddr iova_first, hwaddr iova_last)
tree->iova_taddr_map = iova_tree_new();
tree->iova_map = iova_tree_new();
+ tree->gpa_iova_map = gpa_tree_new();
return tree;
}
@@ -58,6 +62,7 @@ void vhost_iova_tree_delete(VhostIOVATree *iova_tree)
{
iova_tree_destroy(iova_tree->iova_taddr_map);
iova_tree_destroy(iova_tree->iova_map);
+ iova_tree_destroy(iova_tree->gpa_iova_map);
g_free(iova_tree);
}
@@ -134,3 +139,48 @@ int vhost_iova_tree_insert(VhostIOVATree *iova_tree, DMAMap *map)
return iova_tree_insert(iova_tree->iova_taddr_map, map);
}
+
+/** Insert a new GPA->IOVA mapping to the GPA->IOVA tree
+ *
+ * @iova_tree: The VhostIOVATree
+ * @map: The GPA->IOVA mapping
+ *
+ * Returns:
+ * - IOVA_OK if the map fits in the container
+ * - IOVA_ERR_INVALID if the map does not make sense (e.g. size overflow)
+ * - IOVA_ERR_OVERLAP if the GPA range overlaps with an existing range
+ */
+int vhost_iova_tree_insert_gpa(VhostIOVATree *iova_tree, DMAMap *map)
+{
+ if (map->iova + map->size < map->iova || map->perm == IOMMU_NONE) {
+ return IOVA_ERR_INVALID;
+ }
+
+ return gpa_tree_insert(iova_tree->gpa_iova_map, map);
+}
+
+/**
+ * Find the IOVA address stored from a guest memory address (GPA)
+ *
+ * @tree: The VhostIOVATree
+ * @map: The map with the guest memory address
+ *
+ * Returns the stored GPA->IOVA mapping, or NULL if not found.
+ */
+const DMAMap *vhost_iova_tree_find_gpa(const VhostIOVATree *tree,
+ const DMAMap *map)
+{
+ return iova_tree_find_iova(tree->gpa_iova_map, map);
+}
+
+/**
+ * Remove existing mappings from the GPA->IOVA & IOVA trees
+ *
+ * @iova_tree: The VhostIOVATree
+ * @map: The guest memory address map to remove
+ */
+void vhost_iova_tree_remove_gpa(VhostIOVATree *iova_tree, DMAMap map)
+{
+ iova_tree_remove(iova_tree->gpa_iova_map, map);
+ iova_tree_remove(iova_tree->iova_map, map);
+}
@@ -24,5 +24,9 @@ const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *iova_tree,
int vhost_iova_tree_map_alloc(VhostIOVATree *iova_tree, DMAMap *map);
void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map);
int vhost_iova_tree_insert(VhostIOVATree *iova_tree, DMAMap *map);
+int vhost_iova_tree_insert_gpa(VhostIOVATree *iova_tree, DMAMap *map);
+const DMAMap *vhost_iova_tree_find_gpa(const VhostIOVATree *iova_tree,
+ const DMAMap *map);
+void vhost_iova_tree_remove_gpa(VhostIOVATree *iova_tree, DMAMap map);
#endif
@@ -361,10 +361,10 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
if (s->shadow_data) {
int r;
- mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
mem_region.size = int128_get64(llsize) - 1,
mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
+ /* Allocate an IOVA range in the IOVA tree */
r = vhost_iova_tree_map_alloc(s->iova_tree, &mem_region);
if (unlikely(r != IOVA_OK)) {
error_report("Can't allocate a mapping (%d)", r);
@@ -372,6 +372,14 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
}
iova = mem_region.iova;
+ mem_region.translated_addr = section->offset_within_address_space;
+
+ /* Add GPA->IOVA mapping to the GPA->IOVA tree */
+ r = vhost_iova_tree_insert_gpa(s->iova_tree, &mem_region);
+ if (unlikely(r != IOVA_OK)) {
+ error_report("Can't add listener region mapping (%d)", r);
+ goto fail_map;
+ }
}
vhost_vdpa_iotlb_batch_begin_once(s);
@@ -386,7 +394,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
fail_map:
if (s->shadow_data) {
- vhost_iova_tree_remove(s->iova_tree, mem_region);
+ vhost_iova_tree_remove_gpa(s->iova_tree, mem_region);
}
fail:
@@ -440,21 +448,19 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
if (s->shadow_data) {
const DMAMap *result;
- const void *vaddr = memory_region_get_ram_ptr(section->mr) +
- section->offset_within_region +
- (iova - section->offset_within_address_space);
DMAMap mem_region = {
- .translated_addr = (hwaddr)(uintptr_t)vaddr,
+ .translated_addr = section->offset_within_address_space,
.size = int128_get64(llsize) - 1,
};
- result = vhost_iova_tree_find_iova(s->iova_tree, &mem_region);
+ /* Search the GPA->IOVA tree */
+ result = vhost_iova_tree_find_gpa(s->iova_tree, &mem_region);
if (!result) {
/* The memory listener map wasn't mapped */
return;
}
iova = result->iova;
- vhost_iova_tree_remove(s->iova_tree, *result);
+ vhost_iova_tree_remove_gpa(s->iova_tree, *result);
}
vhost_vdpa_iotlb_batch_begin_once(s);
/*
@@ -40,6 +40,15 @@ typedef struct DMAMap {
} QEMU_PACKED DMAMap;
typedef gboolean (*iova_tree_iterator)(DMAMap *map);
+/**
+ * gpa_tree_new:
+ *
+ * Create a new GPA->IOVA tree.
+ *
+ * Returns: the tree pointer on success, or NULL otherwise.
+ */
+IOVATree *gpa_tree_new(void);
+
/**
* iova_tree_new:
*
@@ -49,6 +58,19 @@ typedef gboolean (*iova_tree_iterator)(DMAMap *map);
*/
IOVATree *iova_tree_new(void);
+/**
+ * gpa_tree_insert:
+ *
+ * @tree: The GPA->IOVA tree we're inserting the mapping to
+ * @map: The GPA->IOVA mapping to insert
+ *
+ * Inserts a GPA range to the GPA->IOVA tree. If there are overlapped
+ * ranges, IOVA_ERR_OVERLAP will be returned.
+ *
+ * Return: 0 if successful, < 0 otherwise.
+ */
+int gpa_tree_insert(IOVATree *tree, const DMAMap *map);
+
/**
* iova_tree_insert:
*
@@ -55,6 +55,22 @@ static void iova_tree_alloc_args_iterate(struct IOVATreeAllocArgs *args,
args->this = next;
}
+static int gpa_tree_compare(gconstpointer a, gconstpointer b, gpointer data)
+{
+ const DMAMap *m1 = a, *m2 = b;
+
+ if (m1->translated_addr > m2->translated_addr + m2->size) {
+ return 1;
+ }
+
+ if (m1->translated_addr + m1->size < m2->translated_addr) {
+ return -1;
+ }
+
+ /* Overlapped */
+ return 0;
+}
+
static int iova_tree_compare(gconstpointer a, gconstpointer b, gpointer data)
{
const DMAMap *m1 = a, *m2 = b;
@@ -71,6 +87,15 @@ static int iova_tree_compare(gconstpointer a, gconstpointer b, gpointer data)
return 0;
}
+IOVATree *gpa_tree_new(void)
+{
+ IOVATree *gpa_tree = g_new0(IOVATree, 1);
+
+ gpa_tree->tree = g_tree_new_full(gpa_tree_compare, NULL, g_free, NULL);
+
+ return gpa_tree;
+}
+
IOVATree *iova_tree_new(void)
{
IOVATree *iova_tree = g_new0(IOVATree, 1);
@@ -121,6 +146,27 @@ static inline void iova_tree_insert_internal(GTree *gtree, DMAMap *range)
g_tree_insert(gtree, range, range);
}
+int gpa_tree_insert(IOVATree *tree, const DMAMap *map)
+{
+ DMAMap *new;
+
+ if (map->translated_addr + map->size < map->translated_addr ||
+ map->perm == IOMMU_NONE) {
+ return IOVA_ERR_INVALID;
+ }
+
+ /* We don't allow inserting ranges that overlap with existing ones */
+ if (iova_tree_find(tree,map)) {
+ return IOVA_ERR_OVERLAP;
+ }
+
+ new = g_new0(DMAMap, 1);
+ memcpy(new, map, sizeof(*new));
+ iova_tree_insert_internal(tree->tree, new);
+
+ return IOVA_OK;
+}
+
int iova_tree_insert(IOVATree *tree, const DMAMap *map)
{
DMAMap *new;
Implements the GPA->IOVA tree for handling mapping and unmapping for guest memory. This, alongside the SVQ IOVA->HVA tree & IOVA-only tree implemented in the previous patches, allows us to handle guest and host-only memory mapping operations separately via their own respective trees. The next patches will implement a method to determine if an incomming address for translation is backed by guest or host-only memory. Signed-off-by: Jonah Palmer <jonah.palmer@oracle.com> --- hw/virtio/vhost-iova-tree.c | 50 +++++++++++++++++++++++++++++++++++++ hw/virtio/vhost-iova-tree.h | 4 +++ hw/virtio/vhost-vdpa.c | 22 ++++++++++------ include/qemu/iova-tree.h | 22 ++++++++++++++++ util/iova-tree.c | 46 ++++++++++++++++++++++++++++++++++ 5 files changed, 136 insertions(+), 8 deletions(-)