@@ -31,8 +31,8 @@ vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
# vhost-vdpa.c
vhost_vdpa_skipped_memory_section(int is_ram, int is_iommu, int is_protected, int is_ram_device, uint64_t first, uint64_t last, int page_mask) "is_ram=%d, is_iommu=%d, is_protected=%d, is_ram_device=%d iova_min=0x%"PRIx64" iova_last=0x%"PRIx64" page_mask=0x%x"
-vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
-vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
+vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type, bool shadow, const char *override) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8" shadow: %d %s"
+vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type, bool shadow, const char *override) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8" shadow: %d %s"
vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
@@ -22,6 +22,8 @@
#include "hw/virtio/vhost-vdpa.h"
#include "exec/address-spaces.h"
#include "migration/blocker.h"
+#include "migration/cpr.h"
+#include "migration/options.h"
#include "qemu/cutils.h"
#include "qemu/main-loop.h"
#include "trace.h"
@@ -97,18 +99,29 @@ int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
struct vhost_msg_v2 msg = {};
int fd = s->device_fd;
int ret = 0;
+ bool remap = false, suppress = false;
+
+ if (migrate_mode() == MIG_MODE_CPR_EXEC && !shadow) {
+ remap = !!(s->backend_cap & BIT_ULL(VHOST_BACKEND_F_IOTLB_REMAP));
+ suppress = !remap;
+ }
msg.type = VHOST_IOTLB_MSG_V2;
msg.asid = asid;
msg.iotlb.iova = iova;
msg.iotlb.size = size;
msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
- msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
- msg.iotlb.type = VHOST_IOTLB_UPDATE;
+ msg.iotlb.perm = remap ? 0 : readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
+ msg.iotlb.type = remap ? VHOST_IOTLB_REMAP : VHOST_IOTLB_UPDATE;
trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova,
msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
- msg.iotlb.type);
+ msg.iotlb.type, shadow,
+ remap ? "(remap)" : suppress ? "(suppress)" : "");
+
+ if (suppress) {
+ return 0;
+ }
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
error_report("failed to write, fd=%d, errno=%d (%s)",
@@ -129,6 +142,7 @@ int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
struct vhost_msg_v2 msg = {};
int fd = s->device_fd;
int ret = 0;
+ bool suppress = migrate_mode() == MIG_MODE_CPR_EXEC && !shadow;
msg.type = VHOST_IOTLB_MSG_V2;
msg.asid = asid;
@@ -137,7 +151,12 @@ int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova,
- msg.iotlb.size, msg.iotlb.type);
+ msg.iotlb.size, msg.iotlb.type, shadow,
+ suppress ? "(suppressed)" : "");
+
+ if (suppress) {
+ return 0;
+ }
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
error_report("failed to write, fd=%d, errno=%d (%s)",
@@ -851,7 +870,8 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
0x1ULL << VHOST_BACKEND_F_IOTLB_ASID |
0x1ULL << VHOST_BACKEND_F_SUSPEND |
- 0x1ULL << VHOST_BACKEND_F_NEW_OWNER;
+ 0x1ULL << VHOST_BACKEND_F_NEW_OWNER |
+ 0x1ULL << VHOST_BACKEND_F_IOTLB_REMAP;
int r;
if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
@@ -253,7 +253,7 @@ def __init__(self, name, props, fmt, args, lineno, filename, orig=None,
self.event_trans = event_trans
self.event_exec = event_exec
- if len(args) > 10:
+ if len(args) > 11:
raise ValueError("Event '%s' has more than maximum permitted "
"argument count" % name)
Preserve dma mappings during CPR restart by suppressing dma_map and dma_unmap calls. For devices with capability VHOST_BACKEND_F_IOTLB_REMAP, convert dma_map calls to VHOST_IOTLB_REMAP to set the new userland VA for the existing mapping. However, map and unmap shadow vq buffers normally. Their pages are not locked in memory, and they are re-created after CPR. Signed-off-by: Steve Sistare <steven.sistare@oracle.com> --- hw/virtio/trace-events | 4 ++-- hw/virtio/vhost-vdpa.c | 30 +++++++++++++++++++++++++----- scripts/tracetool/__init__.py | 2 +- 3 files changed, 28 insertions(+), 8 deletions(-)