@@ -30,6 +30,10 @@ typedef struct VhostVDPAHostNotifier {
void *addr;
} VhostVDPAHostNotifier;
+/* Info shared by all vhost_vdpa device models */
+typedef struct vhost_vdpa_shared {
+} VhostVDPAShared;
+
typedef struct vhost_vdpa {
int device_fd;
int index;
@@ -46,6 +50,7 @@ typedef struct vhost_vdpa {
bool suspended;
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
+ VhostVDPAShared *shared;
GPtrArray *shadow_vqs;
const VhostShadowVirtqueueOps *shadow_vq_ops;
void *shadow_vq_ops_opaque;
@@ -238,6 +238,10 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
qemu_close(s->vhost_vdpa.device_fd);
s->vhost_vdpa.device_fd = -1;
}
+ if (s->vhost_vdpa.index != 0) {
+ return;
+ }
+ g_free(s->vhost_vdpa.shared);
}
static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
@@ -1428,6 +1432,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
bool svq,
struct vhost_vdpa_iova_range iova_range,
uint64_t features,
+ VhostVDPAShared *shared,
Error **errp)
{
NetClientState *nc = NULL;
@@ -1463,6 +1468,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
if (queue_pair_index == 0) {
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
+ s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,
@@ -1475,11 +1481,16 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.shadow_vq_ops_opaque = s;
s->cvq_isolated = cvq_isolated;
}
+ if (queue_pair_index != 0) {
+ s->vhost_vdpa.shared = shared;
+ }
+
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {
qemu_del_net_client(nc);
return NULL;
}
+
return nc;
}
@@ -1591,17 +1602,26 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
for (i = 0; i < queue_pairs; i++) {
+ VhostVDPAShared *shared = NULL;
+
+ if (i) {
+ shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared;
+ }
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq,
- iova_range, features, errp);
+ iova_range, features, shared, errp);
if (!ncs[i])
goto err;
}
if (has_cvq) {
+ VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
+ VhostVDPAShared *shared = s0->vhost_vdpa.shared;
+
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false,
- opts->x_svq, iova_range, features, errp);
+ opts->x_svq, iova_range, features, shared,
+ errp);
if (!nc)
goto err;
}
It will hold properties shared among all vhost_vdpa instances associated with of the same device. For example, we just need one iova_tree or one memory listener for the entire device. Next patches will register the vhost_vdpa memory listener at the beginning of the VM migration at the destination. This enables QEMU to map the memory to the device before stopping the VM at the source, instead of doing while both source and destination are stopped, thus minimizing the downtime. However, the destination QEMU is unaware of which vhost_vdpa struct will register its memory_listener. If the source guest has CVQ enabled, it will be the one associated with the CVQ. Otherwise, it will be the first one. Save the memory operations related members in a common place rather than always in the first / last vhost_vdpa. Signed-off-by: Eugenio PĂ©rez <eperezma@redhat.com> --- include/hw/virtio/vhost-vdpa.h | 5 +++++ net/vhost-vdpa.c | 24 ++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 2 deletions(-)