@@ -72,6 +72,7 @@ void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr, size_t nr);
int64_t ramblock_recv_bitmap_send(QEMUFile *file,
const char *block_name);
int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb);
+bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start);
/* ram cache */
int colo_init_ram_cache(void);
@@ -671,6 +671,29 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
return ret;
}
+static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
+ ram_addr_t start, uint64_t haddr)
+{
+ void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
+
+ /*
+ * Discarded pages (via RamDiscardManager) are never migrated. On unlikely
+ * access, place a zeropage, which will also set the relevant bits in the
+ * recv_bitmap accordingly, so we won't try placing a zeropage twice.
+ *
+ * Checking a single bit is sufficient to handle pagesize > TPS as either
+ * all relevant bits are set or not.
+ */
+ assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb)));
+ if (ramblock_page_is_discarded(rb, start)) {
+ bool received = ramblock_recv_bitmap_test_byte_offset(rb, start);
+
+ return received ? 0 : postcopy_place_page_zero(mis, aligned, rb);
+ }
+
+ return migrate_send_rp_req_pages(mis, rb, start, haddr);
+}
+
/*
* Callback from shared fault handlers to ask for a page,
* the page must be specified by a RAMBlock and an offset in that rb
@@ -690,7 +713,7 @@ int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
qemu_ram_get_idstr(rb), rb_offset);
return postcopy_wake_shared(pcfd, client_addr, rb);
}
- migrate_send_rp_req_pages(mis, rb, aligned_rbo, client_addr);
+ postcopy_request_page(mis, rb, aligned_rbo, client_addr);
return 0;
}
@@ -984,8 +1007,8 @@ retry:
* Send the request to the source - we want to request one
* of our host page sizes (which is >= TPS)
*/
- ret = migrate_send_rp_req_pages(mis, rb, rb_offset,
- msg.arg.pagefault.address);
+ ret = postcopy_request_page(mis, rb, rb_offset,
+ msg.arg.pagefault.address);
if (ret) {
/* May be network failure, try to wait for recovery */
if (ret == -EIO && postcopy_pause_fault_thread(mis)) {
@@ -993,7 +1016,7 @@ retry:
goto retry;
} else {
/* This is a unavoidable fault */
- error_report("%s: migrate_send_rp_req_pages() get %d",
+ error_report("%s: postcopy_request_page() get %d",
__func__, ret);
break;
}
@@ -912,6 +912,27 @@ static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb)
return cleared_bits;
}
+/*
+ * Check if a host-page aligned page falls into a discarded range as managed by
+ * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
+ *
+ * Note: The result is only stable while migrating (precopy/postcopy).
+ */
+bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start)
+{
+ if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
+ RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
+ MemoryRegionSection section = {
+ .mr = rb->mr,
+ .offset_within_region = start,
+ .size = int128_make64(qemu_ram_pagesize(rb)),
+ };
+
+ return !ram_discard_manager_is_populated(rdm, §ion);
+ }
+ return false;
+}
+
/* Called with RCU critical section */
static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
{