@@ -287,6 +287,13 @@ static void aarch64_cpu_set_pc(CPUState *cs, vaddr value)
}
}
+static void aarch64_set_excl_range(CPUState *cpu, hwaddr addr, hwaddr size)
+{
+ cpu->excl_protected_range.begin = addr;
+ /* At least cover 128 bits for a STXP access (two paired doublewords case)*/
+ cpu->excl_protected_range.end = addr + 16;
+}
+
static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
{
CPUClass *cc = CPU_CLASS(oc);
@@ -297,6 +304,7 @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = aarch64_cpu_gdb_write_register;
cc->gdb_num_core_regs = 34;
cc->gdb_core_xml_file = "aarch64-core.xml";
+ cc->cpu_set_excl_protected_range = aarch64_set_excl_range;
}
static void aarch64_cpu_register(const ARMCPUInfo *info)
In aarch64 the LDXP/STXP instructions allow to perform up to 128 bits exclusive accesses. However, due to a softmmu limitation, such wide accesses are not allowed. To workaround this limitation, we need to support LoadLink instructions that cover at least 128 consecutive bits (see the next patch for more details). Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> --- target-arm/cpu64.c | 8 ++++++++ 1 file changed, 8 insertions(+)