diff mbox series

[v2,13/21] target/arm: Pass MemOp to get_phys_addr

Message ID 20241005200600.493604-14-richard.henderson@linaro.org (mailing list archive)
State New, archived
Headers show
Series accel/tcg: Introduce tlb_fill_align hook | expand

Commit Message

Richard Henderson Oct. 5, 2024, 8:05 p.m. UTC
Zero is the safe do-nothing value for callers to use.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/internals.h      | 3 ++-
 target/arm/ptw.c            | 2 +-
 target/arm/tcg/m_helper.c   | 8 ++++----
 target/arm/tcg/tlb_helper.c | 2 +-
 4 files changed, 8 insertions(+), 7 deletions(-)

Comments

Helge Deller Oct. 7, 2024, 9:20 p.m. UTC | #1
On 10/5/24 22:05, Richard Henderson wrote:
> Zero is the safe do-nothing value for callers to use.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>


Reviewed-by: Helge Deller <deller@gmx.de>

> ---
>   target/arm/internals.h      | 3 ++-
>   target/arm/ptw.c            | 2 +-
>   target/arm/tcg/m_helper.c   | 8 ++++----
>   target/arm/tcg/tlb_helper.c | 2 +-
>   4 files changed, 8 insertions(+), 7 deletions(-)
Peter Maydell Oct. 8, 2024, 2:45 p.m. UTC | #2
On Sat, 5 Oct 2024 at 21:06, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Zero is the safe do-nothing value for callers to use.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  target/arm/internals.h      | 3 ++-
>  target/arm/ptw.c            | 2 +-
>  target/arm/tcg/m_helper.c   | 8 ++++----
>  target/arm/tcg/tlb_helper.c | 2 +-
>  4 files changed, 8 insertions(+), 7 deletions(-)

> diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
> index 23d7f73035..f7354f3c6e 100644
> --- a/target/arm/tcg/m_helper.c
> +++ b/target/arm/tcg/m_helper.c
> @@ -222,7 +222,7 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
>      int exc;
>      bool exc_secure;
>
> -    if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
> +    if (get_phys_addr(env, addr, MMU_DATA_STORE, 0, mmu_idx, &res, &fi)) {
>          /* MPU/SAU lookup failed */
>          if (fi.type == ARMFault_QEMU_SFault) {
>              if (mode == STACK_LAZYFP) {
> @@ -311,7 +311,7 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
>      bool exc_secure;
>      uint32_t value;
>
> -    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
> +    if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
>          /* MPU/SAU lookup failed */
>          if (fi.type == ARMFault_QEMU_SFault) {
>              qemu_log_mask(CPU_LOG_INT,

We do actually know what kind of memory operation we're doing here:
it's a 4-byte access. (It should never be unaligned because an M-profile
SP can't ever be un-4-aligned, though I forget whether our implementation
really enforces that.)

> @@ -2009,7 +2009,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
>                        "...really SecureFault with SFSR.INVEP\n");
>          return false;
>      }
> -    if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
> +    if (get_phys_addr(env, addr, MMU_INST_FETCH, 0, mmu_idx, &res, &fi)) {
>          /* the MPU lookup failed */
>          env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
>          armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);

Similarly this is a 16-bit load that in theory should never
be possible to be unaligned.

> @@ -2045,7 +2045,7 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
>      ARMMMUFaultInfo fi = {};
>      uint32_t value;
>
> -    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
> +    if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
>          /* MPU/SAU lookup failed */
>          if (fi.type == ARMFault_QEMU_SFault) {
>              qemu_log_mask(CPU_LOG_INT,

and this is another 4-byte load via sp.

> diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
> index 885bf4ec14..1d8b7bcaa2 100644
> --- a/target/arm/tcg/tlb_helper.c
> +++ b/target/arm/tcg/tlb_helper.c
> @@ -344,7 +344,7 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>       * return false.  Otherwise populate fsr with ARM DFSR/IFSR fault
>       * register format, and signal the fault.
>       */
> -    ret = get_phys_addr(&cpu->env, address, access_type,
> +    ret = get_phys_addr(&cpu->env, address, access_type, 0,
>                          core_to_arm_mmu_idx(&cpu->env, mmu_idx),
>                          &res, fi);
>      if (likely(!ret)) {

thanks
-- PMM
Richard Henderson Oct. 8, 2024, 5:32 p.m. UTC | #3
On 10/8/24 07:45, Peter Maydell wrote:
> On Sat, 5 Oct 2024 at 21:06, Richard Henderson
> <richard.henderson@linaro.org> wrote:
>>
>> Zero is the safe do-nothing value for callers to use.
>>
>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>> ---
>>   target/arm/internals.h      | 3 ++-
>>   target/arm/ptw.c            | 2 +-
>>   target/arm/tcg/m_helper.c   | 8 ++++----
>>   target/arm/tcg/tlb_helper.c | 2 +-
>>   4 files changed, 8 insertions(+), 7 deletions(-)
> 
>> diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
>> index 23d7f73035..f7354f3c6e 100644
>> --- a/target/arm/tcg/m_helper.c
>> +++ b/target/arm/tcg/m_helper.c
>> @@ -222,7 +222,7 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
>>       int exc;
>>       bool exc_secure;
>>
>> -    if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
>> +    if (get_phys_addr(env, addr, MMU_DATA_STORE, 0, mmu_idx, &res, &fi)) {
>>           /* MPU/SAU lookup failed */
>>           if (fi.type == ARMFault_QEMU_SFault) {
>>               if (mode == STACK_LAZYFP) {
>> @@ -311,7 +311,7 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
>>       bool exc_secure;
>>       uint32_t value;
>>
>> -    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
>> +    if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
>>           /* MPU/SAU lookup failed */
>>           if (fi.type == ARMFault_QEMU_SFault) {
>>               qemu_log_mask(CPU_LOG_INT,
> 
> We do actually know what kind of memory operation we're doing here:
> it's a 4-byte access. (It should never be unaligned because an M-profile
> SP can't ever be un-4-aligned, though I forget whether our implementation
> really enforces that.)
> 
>> @@ -2009,7 +2009,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
>>                         "...really SecureFault with SFSR.INVEP\n");
>>           return false;
>>       }
>> -    if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
>> +    if (get_phys_addr(env, addr, MMU_INST_FETCH, 0, mmu_idx, &res, &fi)) {
>>           /* the MPU lookup failed */
>>           env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
>>           armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
> 
> Similarly this is a 16-bit load that in theory should never
> be possible to be unaligned.
> 
>> @@ -2045,7 +2045,7 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
>>       ARMMMUFaultInfo fi = {};
>>       uint32_t value;
>>
>> -    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
>> +    if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
>>           /* MPU/SAU lookup failed */
>>           if (fi.type == ARMFault_QEMU_SFault) {
>>               qemu_log_mask(CPU_LOG_INT,
> 
> and this is another 4-byte load via sp.
> 
>> diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
>> index 885bf4ec14..1d8b7bcaa2 100644
>> --- a/target/arm/tcg/tlb_helper.c
>> +++ b/target/arm/tcg/tlb_helper.c
>> @@ -344,7 +344,7 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>>        * return false.  Otherwise populate fsr with ARM DFSR/IFSR fault
>>        * register format, and signal the fault.
>>        */
>> -    ret = get_phys_addr(&cpu->env, address, access_type,
>> +    ret = get_phys_addr(&cpu->env, address, access_type, 0,
>>                           core_to_arm_mmu_idx(&cpu->env, mmu_idx),
>>                           &res, fi);
>>       if (likely(!ret)) {

The question is: if it should be impossible for them to be misaligned, should we pass an 
argument that checks alignment and then (!) potentially raise a guest exception.

I suspect the answer is no.

If it should be impossible, no alignment fault is ever visible to the guest in this 
context, then we should at most assert(), otherwise do nothing.

We *can* pass, e.g. MO_32 or MO_16 for documentation purposes, if you like.  Without 
additional adornment, this does not imply alignment enforcement (i.e. MO_ALIGN).  But this 
would be functionally indistinguishable from 0 (which I imperfectly documented with "or 0" 
in the function block comments).


r~
Peter Maydell Oct. 9, 2024, 1:59 p.m. UTC | #4
On Tue, 8 Oct 2024 at 18:32, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> On 10/8/24 07:45, Peter Maydell wrote:
> > On Sat, 5 Oct 2024 at 21:06, Richard Henderson
> > <richard.henderson@linaro.org> wrote:
> >>
> >> Zero is the safe do-nothing value for callers to use.
> >>
> >> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> >> ---
> >>   target/arm/internals.h      | 3 ++-
> >>   target/arm/ptw.c            | 2 +-
> >>   target/arm/tcg/m_helper.c   | 8 ++++----
> >>   target/arm/tcg/tlb_helper.c | 2 +-
> >>   4 files changed, 8 insertions(+), 7 deletions(-)
> >
> >> diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
> >> index 23d7f73035..f7354f3c6e 100644
> >> --- a/target/arm/tcg/m_helper.c
> >> +++ b/target/arm/tcg/m_helper.c
> >> @@ -222,7 +222,7 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
> >>       int exc;
> >>       bool exc_secure;
> >>
> >> -    if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
> >> +    if (get_phys_addr(env, addr, MMU_DATA_STORE, 0, mmu_idx, &res, &fi)) {
> >>           /* MPU/SAU lookup failed */
> >>           if (fi.type == ARMFault_QEMU_SFault) {
> >>               if (mode == STACK_LAZYFP) {
> >> @@ -311,7 +311,7 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
> >>       bool exc_secure;
> >>       uint32_t value;
> >>
> >> -    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
> >> +    if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
> >>           /* MPU/SAU lookup failed */
> >>           if (fi.type == ARMFault_QEMU_SFault) {
> >>               qemu_log_mask(CPU_LOG_INT,
> >
> > We do actually know what kind of memory operation we're doing here:
> > it's a 4-byte access. (It should never be unaligned because an M-profile
> > SP can't ever be un-4-aligned, though I forget whether our implementation
> > really enforces that.)
> >
> >> @@ -2009,7 +2009,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
> >>                         "...really SecureFault with SFSR.INVEP\n");
> >>           return false;
> >>       }
> >> -    if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
> >> +    if (get_phys_addr(env, addr, MMU_INST_FETCH, 0, mmu_idx, &res, &fi)) {
> >>           /* the MPU lookup failed */
> >>           env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
> >>           armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
> >
> > Similarly this is a 16-bit load that in theory should never
> > be possible to be unaligned.
> >
> >> @@ -2045,7 +2045,7 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
> >>       ARMMMUFaultInfo fi = {};
> >>       uint32_t value;
> >>
> >> -    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
> >> +    if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
> >>           /* MPU/SAU lookup failed */
> >>           if (fi.type == ARMFault_QEMU_SFault) {
> >>               qemu_log_mask(CPU_LOG_INT,
> >
> > and this is another 4-byte load via sp.
> >
> >> diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
> >> index 885bf4ec14..1d8b7bcaa2 100644
> >> --- a/target/arm/tcg/tlb_helper.c
> >> +++ b/target/arm/tcg/tlb_helper.c
> >> @@ -344,7 +344,7 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> >>        * return false.  Otherwise populate fsr with ARM DFSR/IFSR fault
> >>        * register format, and signal the fault.
> >>        */
> >> -    ret = get_phys_addr(&cpu->env, address, access_type,
> >> +    ret = get_phys_addr(&cpu->env, address, access_type, 0,
> >>                           core_to_arm_mmu_idx(&cpu->env, mmu_idx),
> >>                           &res, fi);
> >>       if (likely(!ret)) {
>
> The question is: if it should be impossible for them to be misaligned, should we pass an
> argument that checks alignment and then (!) potentially raise a guest exception.
>
> I suspect the answer is no.
>
> If it should be impossible, no alignment fault is ever visible to the guest in this
> context, then we should at most assert(), otherwise do nothing.
>
> We *can* pass, e.g. MO_32 or MO_16 for documentation purposes, if you like.  Without
> additional adornment, this does not imply alignment enforcement (i.e. MO_ALIGN).  But this
> would be functionally indistinguishable from 0 (which I imperfectly documented with "or 0"
> in the function block comments).

Mmm. I think I thought when I started reviewing this series that we might
have a bigger set of "we put in 0 here but is that the right thing?"
callsites. But I think in working through it it turns out there aren't
very many and for all of those "don't check alignment" is the right thing.

thanks
-- PMM
diff mbox series

Patch

diff --git a/target/arm/internals.h b/target/arm/internals.h
index 1e5da81ce9..2b16579fa5 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1432,6 +1432,7 @@  typedef struct GetPhysAddrResult {
  * @env: CPUARMState
  * @address: virtual address to get physical address for
  * @access_type: 0 for read, 1 for write, 2 for execute
+ * @memop: memory operation feeding this access, or 0 for none
  * @mmu_idx: MMU index indicating required translation regime
  * @result: set on translation success.
  * @fi: set to fault info if the translation fails
@@ -1450,7 +1451,7 @@  typedef struct GetPhysAddrResult {
  *    value.
  */
 bool get_phys_addr(CPUARMState *env, vaddr address,
-                   MMUAccessType access_type, ARMMMUIdx mmu_idx,
+                   MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
     __attribute__((nonnull));
 
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 659855133c..373095a339 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -3572,7 +3572,7 @@  bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
 }
 
 bool get_phys_addr(CPUARMState *env, vaddr address,
-                   MMUAccessType access_type, ARMMMUIdx mmu_idx,
+                   MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
 {
     S1Translate ptw = {
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
index 23d7f73035..f7354f3c6e 100644
--- a/target/arm/tcg/m_helper.c
+++ b/target/arm/tcg/m_helper.c
@@ -222,7 +222,7 @@  static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
     int exc;
     bool exc_secure;
 
-    if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
+    if (get_phys_addr(env, addr, MMU_DATA_STORE, 0, mmu_idx, &res, &fi)) {
         /* MPU/SAU lookup failed */
         if (fi.type == ARMFault_QEMU_SFault) {
             if (mode == STACK_LAZYFP) {
@@ -311,7 +311,7 @@  static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
     bool exc_secure;
     uint32_t value;
 
-    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
+    if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
         /* MPU/SAU lookup failed */
         if (fi.type == ARMFault_QEMU_SFault) {
             qemu_log_mask(CPU_LOG_INT,
@@ -2009,7 +2009,7 @@  static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
                       "...really SecureFault with SFSR.INVEP\n");
         return false;
     }
-    if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
+    if (get_phys_addr(env, addr, MMU_INST_FETCH, 0, mmu_idx, &res, &fi)) {
         /* the MPU lookup failed */
         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
@@ -2045,7 +2045,7 @@  static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
     ARMMMUFaultInfo fi = {};
     uint32_t value;
 
-    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
+    if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
         /* MPU/SAU lookup failed */
         if (fi.type == ARMFault_QEMU_SFault) {
             qemu_log_mask(CPU_LOG_INT,
diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
index 885bf4ec14..1d8b7bcaa2 100644
--- a/target/arm/tcg/tlb_helper.c
+++ b/target/arm/tcg/tlb_helper.c
@@ -344,7 +344,7 @@  bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
      * return false.  Otherwise populate fsr with ARM DFSR/IFSR fault
      * register format, and signal the fault.
      */
-    ret = get_phys_addr(&cpu->env, address, access_type,
+    ret = get_phys_addr(&cpu->env, address, access_type, 0,
                         core_to_arm_mmu_idx(&cpu->env, mmu_idx),
                         &res, fi);
     if (likely(!ret)) {