@@ -40,6 +40,8 @@ typedef struct vhost_vdpa {
struct vhost_vdpa_iova_range iova_range;
uint64_t acked_features;
bool shadow_vqs_enabled;
+ /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
+ bool shadow_data;
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
GPtrArray *shadow_vqs;
@@ -224,7 +224,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
vaddr, section->readonly);
llsize = int128_sub(llend, int128_make64(iova));
- if (v->shadow_vqs_enabled) {
+ if (v->shadow_data) {
int r;
mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
@@ -251,7 +251,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
return;
fail_map:
- if (v->shadow_vqs_enabled) {
+ if (v->shadow_data) {
vhost_iova_tree_remove(v->iova_tree, mem_region);
}
@@ -296,7 +296,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
llsize = int128_sub(llend, int128_make64(iova));
- if (v->shadow_vqs_enabled) {
+ if (v->shadow_data) {
const DMAMap *result;
const void *vaddr = memory_region_get_ram_ptr(section->mr) +
section->offset_within_region +
@@ -587,6 +587,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.index = queue_pair_index;
s->always_svq = svq;
s->vhost_vdpa.shadow_vqs_enabled = svq;
+ s->vhost_vdpa.shadow_data = svq;
s->vhost_vdpa.iova_tree = iova_tree;
if (!is_datapath) {
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),