@@ -32,6 +32,8 @@ typedef struct VhostVDPAHostNotifier {
/* Info shared by all vhost_vdpa device models */
typedef struct vhost_vdpa_shared {
+ struct vhost_vdpa_iova_range iova_range;
+
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
} VhostVDPAShared;
@@ -43,7 +45,6 @@ typedef struct vhost_vdpa {
bool iotlb_batch_begin_sent;
uint32_t address_space_id;
MemoryListener listener;
- struct vhost_vdpa_iova_range iova_range;
uint64_t acked_features;
bool shadow_vqs_enabled;
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
@@ -114,7 +114,8 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
strerror(-ret));
goto free_vqs;
}
- v->vdpa.iova_range = iova_range;
+ v->vdpa.shared = g_new0(VhostVDPAShared, 1);
+ v->vdpa.shared->iova_range = iova_range;
ret = vhost_dev_init(&v->dev, &v->vdpa, VHOST_BACKEND_TYPE_VDPA, 0, NULL);
if (ret < 0) {
@@ -162,6 +163,7 @@ vhost_cleanup:
vhost_dev_cleanup(&v->dev);
free_vqs:
g_free(vqs);
+ g_free(v->vdpa.shared);
out:
qemu_close(v->vhostfd);
v->vhostfd = -1;
@@ -184,6 +186,7 @@ static void vhost_vdpa_device_unrealize(DeviceState *dev)
g_free(s->config);
g_free(s->dev.vqs);
vhost_dev_cleanup(&s->dev);
+ g_free(s->vdpa.shared);
qemu_close(s->vhostfd);
s->vhostfd = -1;
}
@@ -213,10 +213,10 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
RCU_READ_LOCK_GUARD();
/* check if RAM section out of device range */
llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova));
- if (int128_gt(llend, int128_make64(v->iova_range.last))) {
+ if (int128_gt(llend, int128_make64(v->shared->iova_range.last))) {
error_report("RAM section out of device range (max=0x%" PRIx64
", end addr=0x%" PRIx64 ")",
- v->iova_range.last, int128_get64(llend));
+ v->shared->iova_range.last, int128_get64(llend));
return;
}
@@ -316,8 +316,10 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
int page_size = qemu_target_page_size();
int page_mask = -page_size;
- if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
- v->iova_range.last, page_mask)) {
+ if (vhost_vdpa_listener_skipped_section(section,
+ v->shared->iova_range.first,
+ v->shared->iova_range.last,
+ page_mask)) {
return;
}
if (memory_region_is_iommu(section->mr)) {
@@ -403,8 +405,10 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
int page_size = qemu_target_page_size();
int page_mask = -page_size;
- if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
- v->iova_range.last, page_mask)) {
+ if (vhost_vdpa_listener_skipped_section(section,
+ v->shared->iova_range.first,
+ v->shared->iova_range.last,
+ page_mask)) {
return;
}
if (memory_region_is_iommu(section->mr)) {
@@ -354,8 +354,8 @@ static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
migration_add_notifier(&s->migration_state,
vdpa_net_migration_state_notifier);
if (v->shadow_vqs_enabled) {
- v->shared->iova_tree = vhost_iova_tree_new(v->iova_range.first,
- v->iova_range.last);
+ v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first,
+ v->shared->iova_range.last);
}
}
@@ -591,8 +591,8 @@ out:
* and it is not worth it for the moment.
*/
if (!v->shared->iova_tree) {
- v->shared->iova_tree = vhost_iova_tree_new(v->iova_range.first,
- v->iova_range.last);
+ v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first,
+ v->shared->iova_range.last);
}
r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
@@ -1688,12 +1688,12 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->always_svq = svq;
s->migration_state.notify = NULL;
s->vhost_vdpa.shadow_vqs_enabled = svq;
- s->vhost_vdpa.iova_range = iova_range;
s->vhost_vdpa.shadow_data = svq;
if (queue_pair_index == 0) {
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
+ s->vhost_vdpa.shared->iova_range = iova_range;
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,