@@ -36,6 +36,9 @@ typedef struct vhost_vdpa_shared {
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
+
+ /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
+ bool shadow_data;
} VhostVDPAShared;
typedef struct vhost_vdpa {
@@ -47,8 +50,6 @@ typedef struct vhost_vdpa {
MemoryListener listener;
uint64_t acked_features;
bool shadow_vqs_enabled;
- /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
- bool shadow_data;
/* Device suspended successfully */
bool suspended;
VhostVDPAShared *shared;
@@ -353,7 +353,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
vaddr, section->readonly);
llsize = int128_sub(llend, int128_make64(iova));
- if (v->shadow_data) {
+ if (v->shared->shadow_data) {
int r;
mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
@@ -380,7 +380,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
return;
fail_map:
- if (v->shadow_data) {
+ if (v->shared->shadow_data) {
vhost_iova_tree_remove(v->shared->iova_tree, mem_region);
}
@@ -435,7 +435,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
llsize = int128_sub(llend, int128_make64(iova));
- if (v->shadow_data) {
+ if (v->shared->shadow_data) {
const DMAMap *result;
const void *vaddr = memory_region_get_ram_ptr(section->mr) +
section->offset_within_region +
@@ -290,15 +290,6 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
return size;
}
-/** From any vdpa net client, get the netclient of the first queue pair */
-static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
-{
- NICState *nic = qemu_get_nic(s->nc.peer);
- NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
-
- return DO_UPCAST(VhostVDPAState, nc, nc0);
-}
-
static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
{
struct vhost_vdpa *v = &s->vhost_vdpa;
@@ -369,13 +360,12 @@ static int vhost_vdpa_net_data_start(NetClientState *nc)
if (s->always_svq ||
migration_is_setup_or_active(migrate_get_current()->state)) {
v->shadow_vqs_enabled = true;
- v->shadow_data = true;
} else {
v->shadow_vqs_enabled = false;
- v->shadow_data = false;
}
if (v->index == 0) {
+ v->shared->shadow_data = v->shadow_vqs_enabled;
vhost_vdpa_net_data_start_first(s);
return 0;
}
@@ -523,7 +513,7 @@ dma_map_err:
static int vhost_vdpa_net_cvq_start(NetClientState *nc)
{
- VhostVDPAState *s, *s0;
+ VhostVDPAState *s;
struct vhost_vdpa *v;
int64_t cvq_group;
int r;
@@ -534,12 +524,10 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
s = DO_UPCAST(VhostVDPAState, nc, nc);
v = &s->vhost_vdpa;
- s0 = vhost_vdpa_net_first_nc_vdpa(s);
- v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
- v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
+ v->shadow_vqs_enabled = v->shared->shadow_data;
s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
- if (s->vhost_vdpa.shadow_data) {
+ if (v->shared->shadow_data) {
/* SVQ is already configured for all virtqueues */
goto out;
}
@@ -1688,12 +1676,12 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->always_svq = svq;
s->migration_state.notify = NULL;
s->vhost_vdpa.shadow_vqs_enabled = svq;
- s->vhost_vdpa.shadow_data = svq;
if (queue_pair_index == 0) {
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
s->vhost_vdpa.shared->iova_range = iova_range;
+ s->vhost_vdpa.shared->shadow_data = svq;
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,