@@ -42,6 +42,7 @@ typedef struct NICConf {
/* Net clients */
+struct NICState;
typedef void (NetPoll)(NetClientState *, bool enable);
typedef bool (NetCanReceive)(NetClientState *);
typedef int (NetStart)(NetClientState *);
@@ -69,6 +70,9 @@ typedef void (SocketReadStateFinalize)(SocketReadState *rs);
typedef void (NetAnnounce)(NetClientState *);
typedef bool (SetSteeringEBPF)(NetClientState *, int);
typedef bool (NetCheckPeerType)(NetClientState *, ObjectClass *, Error **);
+/* This can be called before start & pair, so get also the peer */
+typedef int (NetMigrationLoadSetup)(NetClientState *, struct NICState *);
+typedef int (NetMigrationLoadCleanup)(NetClientState *, struct NICState *);
typedef struct NetClientInfo {
NetClientDriver type;
@@ -98,6 +102,8 @@ typedef struct NetClientInfo {
NetAnnounce *announce;
SetSteeringEBPF *set_steering_ebpf;
NetCheckPeerType *check_peer_type;
+ NetMigrationLoadSetup *load_setup;
+ NetMigrationLoadCleanup *load_cleanup;
} NetClientInfo;
struct NetClientState {
@@ -406,6 +406,37 @@ static void vhost_vdpa_net_client_stop(NetClientState *nc)
}
}
+static int vhost_vdpa_net_load_setup(NetClientState *nc, NICState *nic)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ VirtIONet *n = qemu_get_nic_opaque(&nic->ncs[0]);
+ VhostVDPAShared *shared = s->vhost_vdpa.shared;
+ int r;
+
+ if (s->always_svq) {
+ /* iova tree is needed because of SVQ */
+ shared->iova_tree = vhost_iova_tree_new(shared->iova_range.first,
+ shared->iova_range.last);
+ }
+
+ r = vhost_vdpa_load_setup(shared, n->parent_obj.dma_as);
+ if (unlikely(r < 0)) {
+ g_clear_pointer(&s->vhost_vdpa.shared->iova_tree,
+ vhost_iova_tree_delete);
+ }
+
+ return r;
+}
+
+static int vhost_vdpa_net_load_cleanup(NetClientState *nc, NICState *nic)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ VirtIONet *n = qemu_get_nic_opaque(&nic->ncs[0]);
+
+ return vhost_vdpa_load_cleanup(s->vhost_vdpa.shared,
+ n->parent_obj.status & VIRTIO_CONFIG_S_DRIVER_OK);
+}
+
static NetClientInfo net_vhost_vdpa_info = {
.type = NET_CLIENT_DRIVER_VHOST_VDPA,
.size = sizeof(VhostVDPAState),
@@ -418,6 +449,8 @@ static NetClientInfo net_vhost_vdpa_info = {
.has_ufo = vhost_vdpa_has_ufo,
.check_peer_type = vhost_vdpa_check_peer_type,
.set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
+ .load_setup = vhost_vdpa_net_load_setup,
+ .load_cleanup = vhost_vdpa_net_load_cleanup,
};
static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
So the vDPA backend knows when a migration incoming starts. NicState argument is needed so we can get the dma address space. Signed-off-by: Eugenio PĂ©rez <eperezma@redhat.com> --- RFC v2: * Solve git conflict with .set_steering_ebpf * Fix x-svq=on use case which did not allocated iova_tree. --- include/net/net.h | 6 ++++++ net/vhost-vdpa.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+)