@@ -475,6 +475,27 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
return qemu_ram_addr_from_host_nofail(p);
}
+/* Keep a circular array with the last excl_history.length addresses used for
+ * exclusive accesses. The exiting addresses are marked as non-exclusive. */
+extern CPUExclusiveHistory excl_history;
+static inline void excl_history_put_addr(hwaddr addr)
+{
+ hwaddr last;
+
+ /* Calculate the index of the next exclusive address */
+ excl_history.last_idx = (excl_history.last_idx + 1) % excl_history.length;
+
+ last = excl_history.c_array[excl_history.last_idx];
+
+ /* Unset EXCL bit of the oldest entry */
+ if (last != EXCLUSIVE_RESET_ADDR) {
+ cpu_physical_memory_unset_excl(last);
+ }
+
+ /* Add a new address, overwriting the oldest one */
+ excl_history.c_array[excl_history.last_idx] = addr & TARGET_PAGE_MASK;
+}
+
#define MMUSUFFIX _mmu
/* Generates LoadLink/StoreConditional helpers in softmmu_template.h */
@@ -177,6 +177,25 @@ struct CPUAddressSpace {
MemoryListener tcg_as_listener;
};
+/* Exclusive memory support */
+CPUExclusiveHistory excl_history;
+void cpu_exclusive_history_init(void)
+{
+ /* Initialize exclusive history for atomic instruction handling. */
+ if (tcg_enabled()) {
+ g_assert(EXCLUSIVE_HISTORY_CPU_LEN * max_cpus <= UINT16_MAX);
+ excl_history.length = EXCLUSIVE_HISTORY_CPU_LEN * max_cpus;
+ excl_history.c_array = g_malloc(excl_history.length * sizeof(hwaddr));
+ memset(excl_history.c_array, -1, excl_history.length * sizeof(hwaddr));
+ }
+}
+
+void cpu_exclusive_history_free(void)
+{
+ if (tcg_enabled()) {
+ g_free(excl_history.c_array);
+ }
+}
#endif
#if !defined(CONFIG_USER_ONLY)
@@ -232,7 +232,15 @@ struct kvm_run;
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/* Atomic insn translation TLB support. */
+typedef struct CPUExclusiveHistory {
+ uint16_t last_idx; /* index of last insertion */
+ uint16_t length; /* history's length, it depends on smp_cpus */
+ hwaddr *c_array; /* history's circular array */
+} CPUExclusiveHistory;
#define EXCLUSIVE_RESET_ADDR ULLONG_MAX
+#define EXCLUSIVE_HISTORY_CPU_LEN 256
+void cpu_exclusive_history_init(void);
+void cpu_exclusive_history_free(void);
/**
* CPUState:
@@ -77,6 +77,7 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr,
CPUState *cpu;
cpu_physical_memory_set_excl(hw_addr);
+ excl_history_put_addr(hw_addr);
CPU_FOREACH(cpu) {
if (this_cpu != cpu) {
tlb_flush(cpu, 1);
@@ -547,6 +547,7 @@ static void res_free(void)
{
g_free(boot_splash_filedata);
boot_splash_filedata = NULL;
+ cpu_exclusive_history_free();
}
static int default_driver_check(void *opaque, QemuOpts *opts, Error **errp)
@@ -4322,6 +4323,8 @@ int main(int argc, char **argv, char **envp)
configure_accelerator(current_machine);
+ cpu_exclusive_history_init();
+
if (qtest_chrdev) {
qtest_init(qtest_chrdev, qtest_log, &error_fatal);
}
Add a circular buffer to store the hw addresses used in the last EXCLUSIVE_HISTORY_LEN exclusive accesses. When an address is pop'ed from the buffer, its page will be set as not exclusive. In this way we avoid frequent set/unset of a page (causing frequent flushes as well). Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> --- cputlb.c | 21 +++++++++++++++++++++ exec.c | 19 +++++++++++++++++++ include/qom/cpu.h | 8 ++++++++ softmmu_llsc_template.h | 1 + vl.c | 3 +++ 5 files changed, 52 insertions(+)