Message ID | 1454059965-23402-7-git-send-email-a.rigo@virtualopensystems.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Alvise Rigo <a.rigo@virtualopensystems.com> writes: > The excl_protected_range is a hwaddr range set by the VCPU at the > execution of a LoadLink instruction. If a normal access writes to this > range, the corresponding StoreCond will fail. > > Each architecture can set the exclusive range when issuing the LoadLink > operation through a CPUClass hook. This comes in handy to emulate, for > instance, the exclusive monitor implemented in some ARM architectures > (more precisely, the Exclusive Reservation Granule). > > In addition, add another CPUClass hook called to decide whether a > StoreCond has to fail or not. > > Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com> > Suggested-by: Claudio Fontana <claudio.fontana@huawei.com> > Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> > --- > include/qom/cpu.h | 15 +++++++++++++++ > qom/cpu.c | 20 ++++++++++++++++++++ > 2 files changed, 35 insertions(+) > > diff --git a/include/qom/cpu.h b/include/qom/cpu.h > index 2e5229d..682c81d 100644 > --- a/include/qom/cpu.h > +++ b/include/qom/cpu.h > @@ -29,6 +29,7 @@ > #include "qemu/queue.h" > #include "qemu/thread.h" > #include "qemu/typedefs.h" > +#include "qemu/range.h" > > typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, > void *opaque); > @@ -183,6 +184,12 @@ typedef struct CPUClass { > void (*cpu_exec_exit)(CPUState *cpu); > bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); > > + /* Atomic instruction handling */ > + void (*cpu_set_excl_protected_range)(CPUState *cpu, hwaddr addr, > + hwaddr size); > + int (*cpu_valid_excl_access)(CPUState *cpu, hwaddr addr, > + hwaddr size); > + > void (*disas_set_info)(CPUState *cpu, disassemble_info *info); > } CPUClass; > > @@ -219,6 +226,9 @@ struct kvm_run; > #define TB_JMP_CACHE_BITS 12 > #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) > > +/* Atomic insn translation TLB support. */ > +#define EXCLUSIVE_RESET_ADDR ULLONG_MAX > + > /** > * CPUState: > * @cpu_index: CPU index (informative). > @@ -341,6 +351,11 @@ struct CPUState { > */ > bool throttle_thread_scheduled; > > + /* vCPU's exclusive addresses range. > + * The address is set to EXCLUSIVE_RESET_ADDR if the vCPU is not > + * in the middle of a LL/SC. */ > + struct Range excl_protected_range; > + In which case we should probably initialise that on CPU creation as we don't start in the middle of a LL/SC. > /* Note that this is accessed at the start of every TB via a negative > offset from AREG0. Leave this field at the end so as to make the > (absolute value) offset as small as possible. This reduces code > diff --git a/qom/cpu.c b/qom/cpu.c > index 8f537a4..a5d360c 100644 > --- a/qom/cpu.c > +++ b/qom/cpu.c > @@ -203,6 +203,24 @@ static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req) > return false; > } > > +static void cpu_common_set_excl_range(CPUState *cpu, hwaddr addr, hwaddr size) > +{ > + cpu->excl_protected_range.begin = addr; > + cpu->excl_protected_range.end = addr + size; > +} > + > +static int cpu_common_valid_excl_access(CPUState *cpu, hwaddr addr, hwaddr size) > +{ > + /* Check if the excl range completely covers the access */ > + if (cpu->excl_protected_range.begin <= addr && > + cpu->excl_protected_range.end >= addr + size) { > + > + return 1; > + } > + > + return 0; > +} This can be a bool function. > + > void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, > int flags) > { > @@ -355,6 +373,8 @@ static void cpu_class_init(ObjectClass *klass, void *data) > k->cpu_exec_enter = cpu_common_noop; > k->cpu_exec_exit = cpu_common_noop; > k->cpu_exec_interrupt = cpu_common_exec_interrupt; > + k->cpu_set_excl_protected_range = cpu_common_set_excl_range; > + k->cpu_valid_excl_access = cpu_common_valid_excl_access; > dc->realize = cpu_common_realizefn; > /* > * Reason: CPUs still need special care by board code: wiring up -- Alex Bennée
On Thu, Feb 11, 2016 at 2:22 PM, Alex Bennée <alex.bennee@linaro.org> wrote: > > Alvise Rigo <a.rigo@virtualopensystems.com> writes: > >> The excl_protected_range is a hwaddr range set by the VCPU at the >> execution of a LoadLink instruction. If a normal access writes to this >> range, the corresponding StoreCond will fail. >> >> Each architecture can set the exclusive range when issuing the LoadLink >> operation through a CPUClass hook. This comes in handy to emulate, for >> instance, the exclusive monitor implemented in some ARM architectures >> (more precisely, the Exclusive Reservation Granule). >> >> In addition, add another CPUClass hook called to decide whether a >> StoreCond has to fail or not. >> >> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com> >> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com> >> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> >> --- >> include/qom/cpu.h | 15 +++++++++++++++ >> qom/cpu.c | 20 ++++++++++++++++++++ >> 2 files changed, 35 insertions(+) >> >> diff --git a/include/qom/cpu.h b/include/qom/cpu.h >> index 2e5229d..682c81d 100644 >> --- a/include/qom/cpu.h >> +++ b/include/qom/cpu.h >> @@ -29,6 +29,7 @@ >> #include "qemu/queue.h" >> #include "qemu/thread.h" >> #include "qemu/typedefs.h" >> +#include "qemu/range.h" >> >> typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, >> void *opaque); >> @@ -183,6 +184,12 @@ typedef struct CPUClass { >> void (*cpu_exec_exit)(CPUState *cpu); >> bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); >> >> + /* Atomic instruction handling */ >> + void (*cpu_set_excl_protected_range)(CPUState *cpu, hwaddr addr, >> + hwaddr size); >> + int (*cpu_valid_excl_access)(CPUState *cpu, hwaddr addr, >> + hwaddr size); >> + >> void (*disas_set_info)(CPUState *cpu, disassemble_info *info); >> } CPUClass; >> >> @@ -219,6 +226,9 @@ struct kvm_run; >> #define TB_JMP_CACHE_BITS 12 >> #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) >> >> +/* Atomic insn translation TLB support. */ >> +#define EXCLUSIVE_RESET_ADDR ULLONG_MAX >> + >> /** >> * CPUState: >> * @cpu_index: CPU index (informative). >> @@ -341,6 +351,11 @@ struct CPUState { >> */ >> bool throttle_thread_scheduled; >> >> + /* vCPU's exclusive addresses range. >> + * The address is set to EXCLUSIVE_RESET_ADDR if the vCPU is not >> + * in the middle of a LL/SC. */ >> + struct Range excl_protected_range; >> + > > In which case we should probably initialise that on CPU creation as we > don't start in the middle of a LL/SC. Agreed. > >> /* Note that this is accessed at the start of every TB via a negative >> offset from AREG0. Leave this field at the end so as to make the >> (absolute value) offset as small as possible. This reduces code >> diff --git a/qom/cpu.c b/qom/cpu.c >> index 8f537a4..a5d360c 100644 >> --- a/qom/cpu.c >> +++ b/qom/cpu.c >> @@ -203,6 +203,24 @@ static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req) >> return false; >> } >> >> +static void cpu_common_set_excl_range(CPUState *cpu, hwaddr addr, hwaddr size) >> +{ >> + cpu->excl_protected_range.begin = addr; >> + cpu->excl_protected_range.end = addr + size; >> +} >> + >> +static int cpu_common_valid_excl_access(CPUState *cpu, hwaddr addr, hwaddr size) >> +{ >> + /* Check if the excl range completely covers the access */ >> + if (cpu->excl_protected_range.begin <= addr && >> + cpu->excl_protected_range.end >= addr + size) { >> + >> + return 1; >> + } >> + >> + return 0; >> +} > > This can be a bool function. OK. Thank you, alvise > >> + >> void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, >> int flags) >> { >> @@ -355,6 +373,8 @@ static void cpu_class_init(ObjectClass *klass, void *data) >> k->cpu_exec_enter = cpu_common_noop; >> k->cpu_exec_exit = cpu_common_noop; >> k->cpu_exec_interrupt = cpu_common_exec_interrupt; >> + k->cpu_set_excl_protected_range = cpu_common_set_excl_range; >> + k->cpu_valid_excl_access = cpu_common_valid_excl_access; >> dc->realize = cpu_common_realizefn; >> /* >> * Reason: CPUs still need special care by board code: wiring up > > > -- > Alex Bennée
diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 2e5229d..682c81d 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -29,6 +29,7 @@ #include "qemu/queue.h" #include "qemu/thread.h" #include "qemu/typedefs.h" +#include "qemu/range.h" typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, void *opaque); @@ -183,6 +184,12 @@ typedef struct CPUClass { void (*cpu_exec_exit)(CPUState *cpu); bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); + /* Atomic instruction handling */ + void (*cpu_set_excl_protected_range)(CPUState *cpu, hwaddr addr, + hwaddr size); + int (*cpu_valid_excl_access)(CPUState *cpu, hwaddr addr, + hwaddr size); + void (*disas_set_info)(CPUState *cpu, disassemble_info *info); } CPUClass; @@ -219,6 +226,9 @@ struct kvm_run; #define TB_JMP_CACHE_BITS 12 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) +/* Atomic insn translation TLB support. */ +#define EXCLUSIVE_RESET_ADDR ULLONG_MAX + /** * CPUState: * @cpu_index: CPU index (informative). @@ -341,6 +351,11 @@ struct CPUState { */ bool throttle_thread_scheduled; + /* vCPU's exclusive addresses range. + * The address is set to EXCLUSIVE_RESET_ADDR if the vCPU is not + * in the middle of a LL/SC. */ + struct Range excl_protected_range; + /* Note that this is accessed at the start of every TB via a negative offset from AREG0. Leave this field at the end so as to make the (absolute value) offset as small as possible. This reduces code diff --git a/qom/cpu.c b/qom/cpu.c index 8f537a4..a5d360c 100644 --- a/qom/cpu.c +++ b/qom/cpu.c @@ -203,6 +203,24 @@ static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req) return false; } +static void cpu_common_set_excl_range(CPUState *cpu, hwaddr addr, hwaddr size) +{ + cpu->excl_protected_range.begin = addr; + cpu->excl_protected_range.end = addr + size; +} + +static int cpu_common_valid_excl_access(CPUState *cpu, hwaddr addr, hwaddr size) +{ + /* Check if the excl range completely covers the access */ + if (cpu->excl_protected_range.begin <= addr && + cpu->excl_protected_range.end >= addr + size) { + + return 1; + } + + return 0; +} + void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, int flags) { @@ -355,6 +373,8 @@ static void cpu_class_init(ObjectClass *klass, void *data) k->cpu_exec_enter = cpu_common_noop; k->cpu_exec_exit = cpu_common_noop; k->cpu_exec_interrupt = cpu_common_exec_interrupt; + k->cpu_set_excl_protected_range = cpu_common_set_excl_range; + k->cpu_valid_excl_access = cpu_common_valid_excl_access; dc->realize = cpu_common_realizefn; /* * Reason: CPUs still need special care by board code: wiring up
The excl_protected_range is a hwaddr range set by the VCPU at the execution of a LoadLink instruction. If a normal access writes to this range, the corresponding StoreCond will fail. Each architecture can set the exclusive range when issuing the LoadLink operation through a CPUClass hook. This comes in handy to emulate, for instance, the exclusive monitor implemented in some ARM architectures (more precisely, the Exclusive Reservation Granule). In addition, add another CPUClass hook called to decide whether a StoreCond has to fail or not. Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> --- include/qom/cpu.h | 15 +++++++++++++++ qom/cpu.c | 20 ++++++++++++++++++++ 2 files changed, 35 insertions(+)