@@ -3113,6 +3113,8 @@ F: include/hw/i386/remote-memory.h
F: hw/i386/remote-memory.c
F: hw/pci/proxy.c
F: include/hw/pci/proxy.h
+F: hw/pci/memory-sync.c
+F: include/hw/pci/memory-sync.h
Build and test automation
-------------------------
@@ -17,6 +17,7 @@
#include "sysemu/runstate.h"
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
+#include "hw/i386/remote-memory.h"
static void process_config_write(QIOChannel *ioc, PCIDevice *dev,
MPQemuMsg *msg);
@@ -64,6 +65,10 @@ void coroutine_fn mpqemu_remote_msg_loop_co(void *data)
case BAR_READ:
process_bar_read(com->ioc, &msg, &local_err);
break;
+ case SYNC_SYSMEM:
+ remote_sysmem_reconfig(&msg, &local_err);
+ break;
+
default:
error_setg(&local_err,
"Unknown command (%d) received for device %s (pid=%d)",
new file mode 100644
@@ -0,0 +1,210 @@
+/*
+ * Copyright © 2018, 2020 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+
+#include "qemu/compiler.h"
+#include "qemu/int128.h"
+#include "qemu/range.h"
+#include "exec/memory.h"
+#include "exec/cpu-common.h"
+#include "cpu.h"
+#include "exec/ram_addr.h"
+#include "exec/address-spaces.h"
+#include "io/mpqemu-link.h"
+#include "hw/pci/memory-sync.h"
+
+static void proxy_ml_begin(MemoryListener *listener)
+{
+ RemoteMemSync *sync = container_of(listener, RemoteMemSync, listener);
+ int mrs;
+
+ for (mrs = 0; mrs < sync->n_mr_sections; mrs++) {
+ memory_region_unref(sync->mr_sections[mrs].mr);
+ }
+
+ g_free(sync->mr_sections);
+ sync->mr_sections = NULL;
+ sync->n_mr_sections = 0;
+}
+
+static int get_fd_from_hostaddr(uint64_t host, ram_addr_t *offset)
+{
+ MemoryRegion *mr;
+ ram_addr_t off;
+
+ /**
+ * Assumes that the host address is a valid address as it's
+ * coming from the MemoryListener system. In the case host
+ * address is not valid, the following call would return
+ * the default subregion of "system_memory" region, and
+ * not NULL. So it's not possible to check for NULL here.
+ */
+ mr = memory_region_from_host((void *)(uintptr_t)host, &off);
+
+ if (offset) {
+ *offset = off;
+ }
+
+ return memory_region_get_fd(mr);
+}
+
+static bool proxy_mrs_can_merge(uint64_t host, uint64_t prev_host, size_t size)
+{
+ bool merge;
+ int fd1, fd2;
+
+ fd1 = get_fd_from_hostaddr(host, NULL);
+
+ fd2 = get_fd_from_hostaddr(prev_host, NULL);
+
+ merge = (fd1 == fd2);
+
+ merge &= ((prev_host + size) == host);
+
+ return merge;
+}
+
+static bool try_merge(RemoteMemSync *sync, MemoryRegionSection *section)
+{
+ uint64_t mrs_size, mrs_gpa, mrs_page;
+ MemoryRegionSection *prev_sec;
+ bool merged = false;
+ uintptr_t mrs_host;
+ RAMBlock *mrs_rb;
+
+ if (!sync->n_mr_sections) {
+ return false;
+ }
+
+ mrs_rb = section->mr->ram_block;
+ mrs_page = (uint64_t)qemu_ram_pagesize(mrs_rb);
+ mrs_size = int128_get64(section->size);
+ mrs_gpa = section->offset_within_address_space;
+ mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region;
+
+ if (get_fd_from_hostaddr(mrs_host, NULL) < 0) {
+ return true;
+ }
+
+ mrs_host = mrs_host & ~(mrs_page - 1);
+ mrs_gpa = mrs_gpa & ~(mrs_page - 1);
+ mrs_size = ROUND_UP(mrs_size, mrs_page);
+
+ prev_sec = sync->mr_sections + (sync->n_mr_sections - 1);
+ uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
+ uint64_t prev_size = int128_get64(prev_sec->size);
+ uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size);
+ uint64_t prev_host_start =
+ (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
+ prev_sec->offset_within_region;
+ uint64_t prev_host_end = range_get_last(prev_host_start, prev_size);
+
+ if (mrs_gpa <= (prev_gpa_end + 1)) {
+ g_assert(mrs_gpa > prev_gpa_start);
+
+ if ((section->mr == prev_sec->mr) &&
+ proxy_mrs_can_merge(mrs_host, prev_host_start,
+ (mrs_gpa - prev_gpa_start))) {
+ uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
+ merged = true;
+ prev_sec->offset_within_address_space =
+ MIN(prev_gpa_start, mrs_gpa);
+ prev_sec->offset_within_region =
+ MIN(prev_host_start, mrs_host) -
+ (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
+ prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
+ mrs_host));
+ }
+ }
+
+ return merged;
+}
+
+static void proxy_ml_region_addnop(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ RemoteMemSync *sync = container_of(listener, RemoteMemSync, listener);
+
+ if (!(memory_region_is_ram(section->mr) &&
+ !memory_region_is_rom(section->mr))) {
+ return;
+ }
+
+ if (try_merge(sync, section)) {
+ return;
+ }
+
+ ++sync->n_mr_sections;
+ sync->mr_sections = g_renew(MemoryRegionSection, sync->mr_sections,
+ sync->n_mr_sections);
+ sync->mr_sections[sync->n_mr_sections - 1] = *section;
+ sync->mr_sections[sync->n_mr_sections - 1].fv = NULL;
+ memory_region_ref(section->mr);
+}
+
+static void proxy_ml_commit(MemoryListener *listener)
+{
+ RemoteMemSync *sync = container_of(listener, RemoteMemSync, listener);
+ MPQemuMsg msg;
+ MemoryRegionSection *section;
+ ram_addr_t offset;
+ uintptr_t host_addr;
+ int region;
+ Error *local_err = NULL;
+
+ memset(&msg, 0, sizeof(MPQemuMsg));
+
+ msg.cmd = SYNC_SYSMEM;
+ msg.num_fds = sync->n_mr_sections;
+ msg.size = sizeof(SyncSysmemMsg);
+ if (msg.num_fds > REMOTE_MAX_FDS) {
+ error_report("Number of fds is more than %d", REMOTE_MAX_FDS);
+ return;
+ }
+
+ for (region = 0; region < sync->n_mr_sections; region++) {
+ section = &sync->mr_sections[region];
+ msg.data.sync_sysmem.gpas[region] =
+ section->offset_within_address_space;
+ msg.data.sync_sysmem.sizes[region] = int128_get64(section->size);
+ host_addr = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region;
+ msg.fds[region] = get_fd_from_hostaddr(host_addr, &offset);
+ msg.data.sync_sysmem.offsets[region] = offset;
+ }
+ mpqemu_msg_send(&msg, sync->ioc, &local_err);
+ if (local_err) {
+ error_report("Error in sending command %d", msg.cmd);
+ }
+}
+
+void deconfigure_memory_sync(RemoteMemSync *sync)
+{
+ memory_listener_unregister(&sync->listener);
+
+ proxy_ml_begin(&sync->listener);
+}
+
+void configure_memory_sync(RemoteMemSync *sync, QIOChannel *ioc)
+{
+ sync->n_mr_sections = 0;
+ sync->mr_sections = NULL;
+
+ sync->ioc = ioc;
+
+ sync->listener.begin = proxy_ml_begin;
+ sync->listener.commit = proxy_ml_commit;
+ sync->listener.region_add = proxy_ml_region_addnop;
+ sync->listener.region_nop = proxy_ml_region_addnop;
+ sync->listener.priority = 10;
+
+ memory_listener_register(&sync->listener, &address_space_memory);
+}
@@ -18,3 +18,5 @@ softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss)
softmmu_ss.add(when: 'CONFIG_PCI', if_false: files('pci-stub.c'))
softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('pci-stub.c'))
+
+specific_ss.add(when: 'CONFIG_MPQEMU', if_true: files('memory-sync.c'))
@@ -18,6 +18,8 @@
#include "migration/blocker.h"
#include "io/mpqemu-link.h"
#include "qemu/error-report.h"
+#include "hw/pci/memory-sync.h"
+#include "qom/object.h"
static void proxy_set_socket(PCIProxyDev *pdev, int fd, Error **errp)
{
@@ -58,6 +60,8 @@ static void pci_proxy_dev_realize(PCIDevice *device, Error **errp)
qemu_mutex_init(&dev->io_mutex);
qio_channel_set_blocking(dev->ioc, true, NULL);
+
+ configure_memory_sync(&dev->sync, dev->ioc);
}
static void pci_proxy_dev_exit(PCIDevice *pdev)
@@ -69,6 +73,8 @@ static void pci_proxy_dev_exit(PCIDevice *pdev)
migrate_del_blocker(dev->migration_blocker);
error_free(dev->migration_blocker);
+
+ deconfigure_memory_sync(&dev->sync);
}
static int config_op_send(PCIProxyDev *pdev, uint32_t addr, uint32_t *val,
new file mode 100644
@@ -0,0 +1,27 @@
+/*
+ * Copyright © 2018, 2020 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMORY_SYNC_H
+#define MEMORY_SYNC_H
+
+#include "exec/memory.h"
+#include "io/channel.h"
+
+typedef struct RemoteMemSync {
+ MemoryListener listener;
+
+ int n_mr_sections;
+ MemoryRegionSection *mr_sections;
+
+ QIOChannel *ioc;
+} RemoteMemSync;
+
+void configure_memory_sync(RemoteMemSync *sync, QIOChannel *ioc);
+void deconfigure_memory_sync(RemoteMemSync *sync);
+
+#endif
@@ -11,6 +11,7 @@
#include "hw/pci/pci.h"
#include "io/channel.h"
+#include "hw/pci/memory-sync.h"
#define TYPE_PCI_PROXY_DEV "x-pci-proxy-dev"
@@ -40,6 +41,7 @@ struct PCIProxyDev {
QemuMutex io_mutex;
QIOChannel *ioc;
Error *migration_blocker;
+ RemoteMemSync sync;
ProxyMemoryRegion region[PCI_NUM_REGIONS];
};