@@ -1579,7 +1579,7 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
ram_list.dirty_memory[i] =
bitmap_zero_extend(ram_list.dirty_memory[i],
old_ram_size, new_ram_size);
- }
+ }
}
cpu_physical_memory_set_dirty_range(new_block->offset,
new_block->used_length,
@@ -19,7 +19,8 @@
#define DIRTY_MEMORY_VGA 0
#define DIRTY_MEMORY_CODE 1
#define DIRTY_MEMORY_MIGRATION 2
-#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
+#define DIRTY_MEMORY_EXCLUSIVE 3
+#define DIRTY_MEMORY_NUM 4 /* num of dirty bits */
#include <stdint.h>
#include <stdbool.h>
@@ -21,6 +21,7 @@
#ifndef CONFIG_USER_ONLY
#include "hw/xen/xen.h"
+#include "sysemu/sysemu.h"
struct RAMBlock {
struct rcu_head rcu;
@@ -172,6 +173,9 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
}
+ if (unlikely(mask & (1 << DIRTY_MEMORY_EXCLUSIVE))) {
+ bitmap_set_atomic(d[DIRTY_MEMORY_EXCLUSIVE], page, end - page);
+ }
xen_modified_memory(start, length);
}
@@ -287,5 +291,32 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
}
void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
+
+/* Exclusive bitmap support. */
+#define EXCL_BITMAP_GET_OFFSET(addr) (addr >> TARGET_PAGE_BITS)
+
+/* Make the page of @addr not exclusive. */
+static inline void cpu_physical_memory_unset_excl(ram_addr_t addr)
+{
+ set_bit_atomic(EXCL_BITMAP_GET_OFFSET(addr),
+ ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE]);
+}
+
+/* Return true if the page of @addr is exclusive, i.e. the EXCL bit is set. */
+static inline int cpu_physical_memory_is_excl(ram_addr_t addr)
+{
+ return !test_bit(EXCL_BITMAP_GET_OFFSET(addr),
+ ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE]);
+}
+
+/* Set the page of @addr as exclusive clearing its EXCL bit and return the
+ * previous bit's state. */
+static inline int cpu_physical_memory_set_excl(ram_addr_t addr)
+{
+ return bitmap_test_and_clear_atomic(
+ ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE],
+ EXCL_BITMAP_GET_OFFSET(addr), 1);
+}
+
#endif
#endif
The purpose of this new bitmap is to flag the memory pages that are in the middle of LL/SC operations (after a LL, before a SC). For all these pages, the corresponding TLB entries will be generated in such a way to force the slow-path for all the VCPUs (see the following patches). When the system starts, the whole memory is set to dirty. Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> --- exec.c | 2 +- include/exec/memory.h | 3 ++- include/exec/ram_addr.h | 31 +++++++++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 2 deletions(-)