diff mbox series

[3/3] target/arm: Handle trapping to EL2 of AArch32 VMRS instructions

Message ID 20191128161718.24361-4-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series target/arm: More HCR_EL2.TIDx fixes | expand

Commit Message

Marc Zyngier Nov. 28, 2019, 4:17 p.m. UTC
HCR_EL2.TID3 requires that AArch32 reads of MVFR[012] are trapped to
EL2, and that HCR_EL2.TID0 does the same for reads of FPSID.
In order to handle this, introduce a new TCG helper function that
checks for these control bits before executing the VMRC instruction.

Tested with a hacked-up version of KVM/arm64 that sets the control
bits for 32bit guests.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 target/arm/helper-a64.h        |  2 ++
 target/arm/internals.h         |  8 ++++++++
 target/arm/translate-vfp.inc.c | 12 +++++++++---
 target/arm/vfp_helper.c        | 27 +++++++++++++++++++++++++++
 4 files changed, 46 insertions(+), 3 deletions(-)

Comments

Peter Maydell Nov. 28, 2019, 4:43 p.m. UTC | #1
On Thu, 28 Nov 2019 at 16:17, Marc Zyngier <maz@kernel.org> wrote:
>
> HCR_EL2.TID3 requires that AArch32 reads of MVFR[012] are trapped to
> EL2, and that HCR_EL2.TID0 does the same for reads of FPSID.
> In order to handle this, introduce a new TCG helper function that
> checks for these control bits before executing the VMRC instruction.
>
> Tested with a hacked-up version of KVM/arm64 that sets the control
> bits for 32bit guests.
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  target/arm/helper-a64.h        |  2 ++
>  target/arm/internals.h         |  8 ++++++++
>  target/arm/translate-vfp.inc.c | 12 +++++++++---
>  target/arm/vfp_helper.c        | 27 +++++++++++++++++++++++++++
>  4 files changed, 46 insertions(+), 3 deletions(-)
>
> diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
> index a915c1247f..311ced44e6 100644
> --- a/target/arm/helper-a64.h
> +++ b/target/arm/helper-a64.h
> @@ -102,3 +102,5 @@ DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, env, i64, i64)
>  DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
>  DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
>  DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
> +
> +DEF_HELPER_3(check_hcr_el2_trap, void, env, int, int)
> diff --git a/target/arm/internals.h b/target/arm/internals.h
> index f5313dd3d4..5a55e960de 100644
> --- a/target/arm/internals.h
> +++ b/target/arm/internals.h
> @@ -430,6 +430,14 @@ static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
>          | (cv << 24) | (cond << 20) | (1 << 5);
>  }
>
> +static inline uint32_t syn_vmrs_trap(int rt, int reg)
> +{
> +    return (EC_FPIDTRAP << ARM_EL_EC_SHIFT)
> +        | ARM_EL_IL
> +        | (1 << 24) | (0xe << 20) | (7 << 14)
> +        | (reg << 10) | (rt << 5) | 1;
> +}
> +
>  static inline uint32_t syn_sve_access_trap(void)
>  {
>      return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
> diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
> index 85c5ef897b..4c435b6c35 100644
> --- a/target/arm/translate-vfp.inc.c
> +++ b/target/arm/translate-vfp.inc.c
> @@ -759,15 +759,21 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
>      }
>
>      if (a->l) {
> +        TCGv_i32 tcg_rt, tcg_reg;
> +
>          /* VMRS, move VFP special register to gp register */
>          switch (a->reg) {
> +        case ARM_VFP_MVFR0:
> +        case ARM_VFP_MVFR1:
> +        case ARM_VFP_MVFR2:
>          case ARM_VFP_FPSID:
> +            tcg_rt = tcg_const_i32(a->rt);
> +            tcg_reg = tcg_const_i32(a->reg);

Since the syndrome value depends only on these two things,
you might as well generate the full syndrome value at
translate time rather than doing it at runtime; then
you only need to pass one thing through to the helper rather
than two.

> +            gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, tcg_reg);

This helper call is potentially going to throw an exception
at runtime. QEMU's JIT doesn't write back all the state
of the CPU to the CPU state structure fields for helper
calls, so to avoid losing non-written-back state there are
two possible approaches:

(1) manually write back the state before the call; for
aarch32 this looks like
            gen_set_condexec(s);
            gen_set_pc_im(s, s->pc_curr);
(you can see this done before we call the access_check_cp_reg()
helper, for instance)

(2) in the helper function, instead of raise_exception(),
call raise_exception_ra(..., GETPC())
This says "when we take the exception, also re-sync the
CPU state by looking at the host PC value in the JITted
code (ie the address of the callsite of the helper) and
looking through a table for this translation block that
cross-references the host PC against the guest PC and
condexec values for that point in execution".

Option 1 is better if the expectation is that the trap will
be taken always, often or usually; option 2 is what we
use if the trap is unlikely (it's how we handle
exceptions on guest load/store insns, which are the main
reason we have the mechanism at all).

Since it's unlikely that guest code will be doing ID
register accesses in hot codepaths, I'd go with option 1,
mostly just for consistency with how we do coprocessor
register access-check function calls.

> +            /* fall through */
>          case ARM_VFP_FPEXC:
>          case ARM_VFP_FPINST:
>          case ARM_VFP_FPINST2:
> -        case ARM_VFP_MVFR0:
> -        case ARM_VFP_MVFR1:
> -        case ARM_VFP_MVFR2:
>              tmp = load_cpu_field(vfp.xregs[a->reg]);
>              break;
>          case ARM_VFP_FPSCR:
> diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
> index 9710ef1c3e..44e538e51c 100644
> --- a/target/arm/vfp_helper.c
> +++ b/target/arm/vfp_helper.c
> @@ -1322,4 +1322,31 @@ float64 HELPER(frint64_d)(float64 f, void *fpst)
>      return frint_d(f, fpst, 64);
>  }
>
> +void HELPER(check_hcr_el2_trap)(CPUARMState *env, int rt, int reg)
> +{
> +    if (arm_current_el(env) != 1) {
> +        return;
> +    }
> +
> +    switch (reg) {
> +    case ARM_VFP_MVFR0:
> +    case ARM_VFP_MVFR1:
> +    case ARM_VFP_MVFR2:
> +        if (!(arm_hcr_el2_eff(env) & HCR_TID3)) {
> +            return;
> +        }
> +        break;
> +    case ARM_VFP_FPSID:
> +        if (!(arm_hcr_el2_eff(env) & HCR_TID0)) {
> +            return;
> +        }
> +        break;
> +    default:
> +        /* Shouldn't be here... */
> +        return;

We usually write 'impossible' default cases as:
           g_assert_not_reached();

> +    }
> +
> +    raise_exception(env, EXCP_HYP_TRAP, syn_vmrs_trap(rt, reg), 2);
> +}
> +
>  #endif

thanks
-- PMM
Marc Zyngier Nov. 28, 2019, 5:49 p.m. UTC | #2
Hi Peter,

Thanks for having a look at this.

On 2019-11-28 16:43, Peter Maydell wrote:
> On Thu, 28 Nov 2019 at 16:17, Marc Zyngier <maz@kernel.org> wrote:
>>
>> HCR_EL2.TID3 requires that AArch32 reads of MVFR[012] are trapped to
>> EL2, and that HCR_EL2.TID0 does the same for reads of FPSID.
>> In order to handle this, introduce a new TCG helper function that
>> checks for these control bits before executing the VMRC instruction.
>>
>> Tested with a hacked-up version of KVM/arm64 that sets the control
>> bits for 32bit guests.
>>
>> Signed-off-by: Marc Zyngier <maz@kernel.org>
>> ---
>>  target/arm/helper-a64.h        |  2 ++
>>  target/arm/internals.h         |  8 ++++++++
>>  target/arm/translate-vfp.inc.c | 12 +++++++++---
>>  target/arm/vfp_helper.c        | 27 +++++++++++++++++++++++++++
>>  4 files changed, 46 insertions(+), 3 deletions(-)
>>
>> diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
>> index a915c1247f..311ced44e6 100644
>> --- a/target/arm/helper-a64.h
>> +++ b/target/arm/helper-a64.h
>> @@ -102,3 +102,5 @@ DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, 
>> env, i64, i64)
>>  DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
>>  DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
>>  DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
>> +
>> +DEF_HELPER_3(check_hcr_el2_trap, void, env, int, int)
>> diff --git a/target/arm/internals.h b/target/arm/internals.h
>> index f5313dd3d4..5a55e960de 100644
>> --- a/target/arm/internals.h
>> +++ b/target/arm/internals.h
>> @@ -430,6 +430,14 @@ static inline uint32_t syn_simd_access_trap(int 
>> cv, int cond, bool is_16bit)
>>          | (cv << 24) | (cond << 20) | (1 << 5);
>>  }
>>
>> +static inline uint32_t syn_vmrs_trap(int rt, int reg)
>> +{
>> +    return (EC_FPIDTRAP << ARM_EL_EC_SHIFT)
>> +        | ARM_EL_IL
>> +        | (1 << 24) | (0xe << 20) | (7 << 14)
>> +        | (reg << 10) | (rt << 5) | 1;
>> +}
>> +
>>  static inline uint32_t syn_sve_access_trap(void)
>>  {
>>      return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
>> diff --git a/target/arm/translate-vfp.inc.c 
>> b/target/arm/translate-vfp.inc.c
>> index 85c5ef897b..4c435b6c35 100644
>> --- a/target/arm/translate-vfp.inc.c
>> +++ b/target/arm/translate-vfp.inc.c
>> @@ -759,15 +759,21 @@ static bool trans_VMSR_VMRS(DisasContext *s, 
>> arg_VMSR_VMRS *a)
>>      }
>>
>>      if (a->l) {
>> +        TCGv_i32 tcg_rt, tcg_reg;
>> +
>>          /* VMRS, move VFP special register to gp register */
>>          switch (a->reg) {
>> +        case ARM_VFP_MVFR0:
>> +        case ARM_VFP_MVFR1:
>> +        case ARM_VFP_MVFR2:
>>          case ARM_VFP_FPSID:
>> +            tcg_rt = tcg_const_i32(a->rt);
>> +            tcg_reg = tcg_const_i32(a->reg);
>
> Since the syndrome value depends only on these two things,
> you might as well generate the full syndrome value at
> translate time rather than doing it at runtime; then
> you only need to pass one thing through to the helper rather
> than two.

OK. This means that the register check in check_hcr_el2_trap
will need to extract the register value from the syndrome.
Not a big deal, but maybe slightly less readable.

>
>> +            gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, 
>> tcg_reg);
>
> This helper call is potentially going to throw an exception
> at runtime. QEMU's JIT doesn't write back all the state
> of the CPU to the CPU state structure fields for helper
> calls, so to avoid losing non-written-back state there are
> two possible approaches:
>
> (1) manually write back the state before the call; for
> aarch32 this looks like
>             gen_set_condexec(s);
>             gen_set_pc_im(s, s->pc_curr);
> (you can see this done before we call the access_check_cp_reg()
> helper, for instance)
>
> (2) in the helper function, instead of raise_exception(),
> call raise_exception_ra(..., GETPC())
> This says "when we take the exception, also re-sync the
> CPU state by looking at the host PC value in the JITted
> code (ie the address of the callsite of the helper) and
> looking through a table for this translation block that
> cross-references the host PC against the guest PC and
> condexec values for that point in execution".
>
> Option 1 is better if the expectation is that the trap will
> be taken always, often or usually; option 2 is what we
> use if the trap is unlikely (it's how we handle
> exceptions on guest load/store insns, which are the main
> reason we have the mechanism at all).
>
> Since it's unlikely that guest code will be doing ID
> register accesses in hot codepaths, I'd go with option 1,
> mostly just for consistency with how we do coprocessor
> register access-check function calls.

Ah, very interesting stuff. There is a lot of "magic" happening
in QEMU, and I wondered about the emulated state at some point,
until I forgot about it!

On a vaguely tangential subject, how are conditional instructions
JIT-ed? I could perfectly imagine a conditional VMRS instruction,
but none of the code I looked at seem to care about it. Or is
that done before the access itself is actually emitted?

>
>> +            /* fall through */
>>          case ARM_VFP_FPEXC:
>>          case ARM_VFP_FPINST:
>>          case ARM_VFP_FPINST2:
>> -        case ARM_VFP_MVFR0:
>> -        case ARM_VFP_MVFR1:
>> -        case ARM_VFP_MVFR2:
>>              tmp = load_cpu_field(vfp.xregs[a->reg]);
>>              break;
>>          case ARM_VFP_FPSCR:
>> diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
>> index 9710ef1c3e..44e538e51c 100644
>> --- a/target/arm/vfp_helper.c
>> +++ b/target/arm/vfp_helper.c
>> @@ -1322,4 +1322,31 @@ float64 HELPER(frint64_d)(float64 f, void 
>> *fpst)
>>      return frint_d(f, fpst, 64);
>>  }
>>
>> +void HELPER(check_hcr_el2_trap)(CPUARMState *env, int rt, int reg)
>> +{
>> +    if (arm_current_el(env) != 1) {
>> +        return;
>> +    }
>> +
>> +    switch (reg) {
>> +    case ARM_VFP_MVFR0:
>> +    case ARM_VFP_MVFR1:
>> +    case ARM_VFP_MVFR2:
>> +        if (!(arm_hcr_el2_eff(env) & HCR_TID3)) {
>> +            return;
>> +        }
>> +        break;
>> +    case ARM_VFP_FPSID:
>> +        if (!(arm_hcr_el2_eff(env) & HCR_TID0)) {
>> +            return;
>> +        }
>> +        break;
>> +    default:
>> +        /* Shouldn't be here... */
>> +        return;
>
> We usually write 'impossible' default cases as:
>            g_assert_not_reached();

Noted, thanks.

I'll wait a bit for additional reviews (if any), and then repost the
series with these fixes in.

Thanks,

         M.
Peter Maydell Nov. 28, 2019, 6:06 p.m. UTC | #3
On Thu, 28 Nov 2019 at 17:49, Marc Zyngier <maz@kernel.org> wrote:
>
> Hi Peter,
>
> Thanks for having a look at this.
>
> On 2019-11-28 16:43, Peter Maydell wrote:
> > On Thu, 28 Nov 2019 at 16:17, Marc Zyngier <maz@kernel.org> wrote:
> >>
> >> HCR_EL2.TID3 requires that AArch32 reads of MVFR[012] are trapped to
> >> EL2, and that HCR_EL2.TID0 does the same for reads of FPSID.
> >> In order to handle this, introduce a new TCG helper function that
> >> checks for these control bits before executing the VMRC instruction.
> >>
> >> Tested with a hacked-up version of KVM/arm64 that sets the control
> >> bits for 32bit guests.
> >>
> >> Signed-off-by: Marc Zyngier <maz@kernel.org>

> > Since the syndrome value depends only on these two things,
> > you might as well generate the full syndrome value at
> > translate time rather than doing it at runtime; then
> > you only need to pass one thing through to the helper rather
> > than two.
>
> OK. This means that the register check in check_hcr_el2_trap
> will need to extract the register value from the syndrome.
> Not a big deal, but maybe slightly less readable.

Oops, I hadn't noticed that we were switching on reg.
Yeah, you might as well leave it as is. (We could have
a separate helper for each of TID0 and TID3 but that
seems like overkill.)

> On a vaguely tangential subject, how are conditional instructions
> JIT-ed? I could perfectly imagine a conditional VMRS instruction,
> but none of the code I looked at seem to care about it. Or is
> that done before the access itself is actually emitted?

Arm conditional instructions are handled at a pretty
high level in the decode, because they all work the same way.
In disas_arm_insn() we have:

    if (cond != 0xe) {
        /* if not always execute, we generate a conditional jump to
           next instruction */
        arm_skip_unless(s, cond);
    }

and there's something similar in thumb_tr_translate_insn()
which puts in a branch based on the thumb condexec bits.
The target of the branch is a label whose position is
set either in arm_post_translate_insn() after the code for the
insn is emitted, or in arm_tr_tb_stop() if the insn is
the last in the TB (always true for branch or trap insns).

thanks
-- PMM
Edgar E. Iglesias Nov. 29, 2019, 8:28 a.m. UTC | #4
On Thu, Nov 28, 2019 at 04:17:18PM +0000, Marc Zyngier wrote:
> HCR_EL2.TID3 requires that AArch32 reads of MVFR[012] are trapped to
> EL2, and that HCR_EL2.TID0 does the same for reads of FPSID.
> In order to handle this, introduce a new TCG helper function that
> checks for these control bits before executing the VMRC instruction.
> 
> Tested with a hacked-up version of KVM/arm64 that sets the control
> bits for 32bit guests.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  target/arm/helper-a64.h        |  2 ++
>  target/arm/internals.h         |  8 ++++++++
>  target/arm/translate-vfp.inc.c | 12 +++++++++---
>  target/arm/vfp_helper.c        | 27 +++++++++++++++++++++++++++
>  4 files changed, 46 insertions(+), 3 deletions(-)
> 
> diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
> index a915c1247f..311ced44e6 100644
> --- a/target/arm/helper-a64.h
> +++ b/target/arm/helper-a64.h
> @@ -102,3 +102,5 @@ DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, env, i64, i64)
>  DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
>  DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
>  DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
> +
> +DEF_HELPER_3(check_hcr_el2_trap, void, env, int, int)
> diff --git a/target/arm/internals.h b/target/arm/internals.h
> index f5313dd3d4..5a55e960de 100644
> --- a/target/arm/internals.h
> +++ b/target/arm/internals.h
> @@ -430,6 +430,14 @@ static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
>          | (cv << 24) | (cond << 20) | (1 << 5);
>  }
>  
> +static inline uint32_t syn_vmrs_trap(int rt, int reg)
> +{
> +    return (EC_FPIDTRAP << ARM_EL_EC_SHIFT)
> +        | ARM_EL_IL
> +        | (1 << 24) | (0xe << 20) | (7 << 14)
> +        | (reg << 10) | (rt << 5) | 1;
> +}
> +
>  static inline uint32_t syn_sve_access_trap(void)
>  {
>      return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
> diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
> index 85c5ef897b..4c435b6c35 100644
> --- a/target/arm/translate-vfp.inc.c
> +++ b/target/arm/translate-vfp.inc.c
> @@ -759,15 +759,21 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
>      }
>  
>      if (a->l) {
> +        TCGv_i32 tcg_rt, tcg_reg;
> +
>          /* VMRS, move VFP special register to gp register */
>          switch (a->reg) {
> +        case ARM_VFP_MVFR0:
> +        case ARM_VFP_MVFR1:
> +        case ARM_VFP_MVFR2:
>          case ARM_VFP_FPSID:
> +            tcg_rt = tcg_const_i32(a->rt);
> +            tcg_reg = tcg_const_i32(a->reg);
> +            gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, tcg_reg);
> +            /* fall through */
>          case ARM_VFP_FPEXC:
>          case ARM_VFP_FPINST:
>          case ARM_VFP_FPINST2:
> -        case ARM_VFP_MVFR0:
> -        case ARM_VFP_MVFR1:
> -        case ARM_VFP_MVFR2:
>              tmp = load_cpu_field(vfp.xregs[a->reg]);
>              break;
>          case ARM_VFP_FPSCR:
> diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
> index 9710ef1c3e..44e538e51c 100644
> --- a/target/arm/vfp_helper.c
> +++ b/target/arm/vfp_helper.c
> @@ -1322,4 +1322,31 @@ float64 HELPER(frint64_d)(float64 f, void *fpst)
>      return frint_d(f, fpst, 64);
>  }
>  
> +void HELPER(check_hcr_el2_trap)(CPUARMState *env, int rt, int reg)
> +{
> +    if (arm_current_el(env) != 1) {
> +        return;
> +    }

I think we could move the EL1 check to translation time, couldn't we?

Other than that:
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>



> +
> +    switch (reg) {
> +    case ARM_VFP_MVFR0:
> +    case ARM_VFP_MVFR1:
> +    case ARM_VFP_MVFR2:
> +        if (!(arm_hcr_el2_eff(env) & HCR_TID3)) {
> +            return;
> +        }
> +        break;
> +    case ARM_VFP_FPSID:
> +        if (!(arm_hcr_el2_eff(env) & HCR_TID0)) {
> +            return;
> +        }
> +        break;
> +    default:
> +        /* Shouldn't be here... */
> +        return;
> +    }
> +
> +    raise_exception(env, EXCP_HYP_TRAP, syn_vmrs_trap(rt, reg), 2);
> +}
> +
>  #endif
> -- 
> 2.20.1
> 
>
Marc Zyngier Nov. 29, 2019, 9:24 a.m. UTC | #5
On 2019-11-29 08:28, Edgar E. Iglesias wrote:
> On Thu, Nov 28, 2019 at 04:17:18PM +0000, Marc Zyngier wrote:
>> HCR_EL2.TID3 requires that AArch32 reads of MVFR[012] are trapped to
>> EL2, and that HCR_EL2.TID0 does the same for reads of FPSID.
>> In order to handle this, introduce a new TCG helper function that
>> checks for these control bits before executing the VMRC instruction.
>>
>> Tested with a hacked-up version of KVM/arm64 that sets the control
>> bits for 32bit guests.
>>
>> Signed-off-by: Marc Zyngier <maz@kernel.org>
>> ---
>>  target/arm/helper-a64.h        |  2 ++
>>  target/arm/internals.h         |  8 ++++++++
>>  target/arm/translate-vfp.inc.c | 12 +++++++++---
>>  target/arm/vfp_helper.c        | 27 +++++++++++++++++++++++++++
>>  4 files changed, 46 insertions(+), 3 deletions(-)
>>
>> diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
>> index a915c1247f..311ced44e6 100644
>> --- a/target/arm/helper-a64.h
>> +++ b/target/arm/helper-a64.h
>> @@ -102,3 +102,5 @@ DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, 
>> env, i64, i64)
>>  DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
>>  DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
>>  DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
>> +
>> +DEF_HELPER_3(check_hcr_el2_trap, void, env, int, int)
>> diff --git a/target/arm/internals.h b/target/arm/internals.h
>> index f5313dd3d4..5a55e960de 100644
>> --- a/target/arm/internals.h
>> +++ b/target/arm/internals.h
>> @@ -430,6 +430,14 @@ static inline uint32_t syn_simd_access_trap(int 
>> cv, int cond, bool is_16bit)
>>          | (cv << 24) | (cond << 20) | (1 << 5);
>>  }
>>
>> +static inline uint32_t syn_vmrs_trap(int rt, int reg)
>> +{
>> +    return (EC_FPIDTRAP << ARM_EL_EC_SHIFT)
>> +        | ARM_EL_IL
>> +        | (1 << 24) | (0xe << 20) | (7 << 14)
>> +        | (reg << 10) | (rt << 5) | 1;
>> +}
>> +
>>  static inline uint32_t syn_sve_access_trap(void)
>>  {
>>      return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
>> diff --git a/target/arm/translate-vfp.inc.c 
>> b/target/arm/translate-vfp.inc.c
>> index 85c5ef897b..4c435b6c35 100644
>> --- a/target/arm/translate-vfp.inc.c
>> +++ b/target/arm/translate-vfp.inc.c
>> @@ -759,15 +759,21 @@ static bool trans_VMSR_VMRS(DisasContext *s, 
>> arg_VMSR_VMRS *a)
>>      }
>>
>>      if (a->l) {
>> +        TCGv_i32 tcg_rt, tcg_reg;
>> +
>>          /* VMRS, move VFP special register to gp register */
>>          switch (a->reg) {
>> +        case ARM_VFP_MVFR0:
>> +        case ARM_VFP_MVFR1:
>> +        case ARM_VFP_MVFR2:
>>          case ARM_VFP_FPSID:
>> +            tcg_rt = tcg_const_i32(a->rt);
>> +            tcg_reg = tcg_const_i32(a->reg);
>> +            gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, 
>> tcg_reg);
>> +            /* fall through */
>>          case ARM_VFP_FPEXC:
>>          case ARM_VFP_FPINST:
>>          case ARM_VFP_FPINST2:
>> -        case ARM_VFP_MVFR0:
>> -        case ARM_VFP_MVFR1:
>> -        case ARM_VFP_MVFR2:
>>              tmp = load_cpu_field(vfp.xregs[a->reg]);
>>              break;
>>          case ARM_VFP_FPSCR:
>> diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
>> index 9710ef1c3e..44e538e51c 100644
>> --- a/target/arm/vfp_helper.c
>> +++ b/target/arm/vfp_helper.c
>> @@ -1322,4 +1322,31 @@ float64 HELPER(frint64_d)(float64 f, void 
>> *fpst)
>>      return frint_d(f, fpst, 64);
>>  }
>>
>> +void HELPER(check_hcr_el2_trap)(CPUARMState *env, int rt, int reg)
>> +{
>> +    if (arm_current_el(env) != 1) {
>> +        return;
>> +    }
>
> I think we could move the EL1 check to translation time, couldn't we?

I think that depends whether the translated code is tagged by EL
or not, or if an exception entry (and exception return) invalidates
the JIT-ed code (and it this case it would have to be CPU-private).

I can perfectly imagine the same piece of code being executed both
at EL0 and EL1, and this would fail if it was executed using the
same JIT-ed code.

So if QEMU gives us the above as a guarantee, we're good. Otherwise,
we need this check. How can I find out?

Thanks,

         M.
Edgar E. Iglesias Nov. 29, 2019, 9:45 a.m. UTC | #6
On Fri, Nov 29, 2019 at 09:24:37AM +0000, Marc Zyngier wrote:
> On 2019-11-29 08:28, Edgar E. Iglesias wrote:
> > On Thu, Nov 28, 2019 at 04:17:18PM +0000, Marc Zyngier wrote:
> > > HCR_EL2.TID3 requires that AArch32 reads of MVFR[012] are trapped to
> > > EL2, and that HCR_EL2.TID0 does the same for reads of FPSID.
> > > In order to handle this, introduce a new TCG helper function that
> > > checks for these control bits before executing the VMRC instruction.
> > > 
> > > Tested with a hacked-up version of KVM/arm64 that sets the control
> > > bits for 32bit guests.
> > > 
> > > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > > ---
> > >  target/arm/helper-a64.h        |  2 ++
> > >  target/arm/internals.h         |  8 ++++++++
> > >  target/arm/translate-vfp.inc.c | 12 +++++++++---
> > >  target/arm/vfp_helper.c        | 27 +++++++++++++++++++++++++++
> > >  4 files changed, 46 insertions(+), 3 deletions(-)
> > > 
> > > diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
> > > index a915c1247f..311ced44e6 100644
> > > --- a/target/arm/helper-a64.h
> > > +++ b/target/arm/helper-a64.h
> > > @@ -102,3 +102,5 @@ DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64,
> > > env, i64, i64)
> > >  DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
> > >  DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
> > >  DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
> > > +
> > > +DEF_HELPER_3(check_hcr_el2_trap, void, env, int, int)
> > > diff --git a/target/arm/internals.h b/target/arm/internals.h
> > > index f5313dd3d4..5a55e960de 100644
> > > --- a/target/arm/internals.h
> > > +++ b/target/arm/internals.h
> > > @@ -430,6 +430,14 @@ static inline uint32_t syn_simd_access_trap(int
> > > cv, int cond, bool is_16bit)
> > >          | (cv << 24) | (cond << 20) | (1 << 5);
> > >  }
> > > 
> > > +static inline uint32_t syn_vmrs_trap(int rt, int reg)
> > > +{
> > > +    return (EC_FPIDTRAP << ARM_EL_EC_SHIFT)
> > > +        | ARM_EL_IL
> > > +        | (1 << 24) | (0xe << 20) | (7 << 14)
> > > +        | (reg << 10) | (rt << 5) | 1;
> > > +}
> > > +
> > >  static inline uint32_t syn_sve_access_trap(void)
> > >  {
> > >      return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
> > > diff --git a/target/arm/translate-vfp.inc.c
> > > b/target/arm/translate-vfp.inc.c
> > > index 85c5ef897b..4c435b6c35 100644
> > > --- a/target/arm/translate-vfp.inc.c
> > > +++ b/target/arm/translate-vfp.inc.c
> > > @@ -759,15 +759,21 @@ static bool trans_VMSR_VMRS(DisasContext *s,
> > > arg_VMSR_VMRS *a)
> > >      }
> > > 
> > >      if (a->l) {
> > > +        TCGv_i32 tcg_rt, tcg_reg;
> > > +
> > >          /* VMRS, move VFP special register to gp register */
> > >          switch (a->reg) {
> > > +        case ARM_VFP_MVFR0:
> > > +        case ARM_VFP_MVFR1:
> > > +        case ARM_VFP_MVFR2:
> > >          case ARM_VFP_FPSID:
> > > +            tcg_rt = tcg_const_i32(a->rt);
> > > +            tcg_reg = tcg_const_i32(a->reg);
> > > +            gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt,
> > > tcg_reg);
> > > +            /* fall through */
> > >          case ARM_VFP_FPEXC:
> > >          case ARM_VFP_FPINST:
> > >          case ARM_VFP_FPINST2:
> > > -        case ARM_VFP_MVFR0:
> > > -        case ARM_VFP_MVFR1:
> > > -        case ARM_VFP_MVFR2:
> > >              tmp = load_cpu_field(vfp.xregs[a->reg]);
> > >              break;
> > >          case ARM_VFP_FPSCR:
> > > diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
> > > index 9710ef1c3e..44e538e51c 100644
> > > --- a/target/arm/vfp_helper.c
> > > +++ b/target/arm/vfp_helper.c
> > > @@ -1322,4 +1322,31 @@ float64 HELPER(frint64_d)(float64 f, void
> > > *fpst)
> > >      return frint_d(f, fpst, 64);
> > >  }
> > > 
> > > +void HELPER(check_hcr_el2_trap)(CPUARMState *env, int rt, int reg)
> > > +{
> > > +    if (arm_current_el(env) != 1) {
> > > +        return;
> > > +    }
> > 
> > I think we could move the EL1 check to translation time, couldn't we?
> 
> I think that depends whether the translated code is tagged by EL
> or not, or if an exception entry (and exception return) invalidates
> the JIT-ed code (and it this case it would have to be CPU-private).
> 
> I can perfectly imagine the same piece of code being executed both
> at EL0 and EL1, and this would fail if it was executed using the
> same JIT-ed code.
> 
> So if QEMU gives us the above as a guarantee, we're good. Otherwise,
> we need this check. How can I find out?

Hi Marc,

IIRC, the current EL was always known at translation time but I've
not been tracking recent changes.

There are several ways to check this, one way is to look in
cpu_get_tb_cpu_state() and see if the state needed to extract the
the EL goes into the TB-flags.

Another way is to look in arm_tr_init_disas_context() and see what gets
extracted from the tb_flags just before translating a block.

From arm_tr_init_disas_context():
    dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);

Unless I'm missing something it's still there, so I think this could be
done at translation time. Peter?

Cheers,
Edgar
Peter Maydell Nov. 29, 2019, 9:51 a.m. UTC | #7
On Fri, 29 Nov 2019 at 09:45, Edgar E. Iglesias
<edgar.iglesias@gmail.com> wrote:
> IIRC, the current EL was always known at translation time but I've
> not been tracking recent changes.

Yes, it's known at translate time, in dc->current_el.
(The code is structured to make it difficult to accidentally
use info that's not known at translate-time: most translate.c
code only has access to the DisasContext struct, and that
struct only has info that is safe to use.)

We need to know the EL at translate time anyway because we
need to generate the right kind of guest load/store, where
the code generated is different for different ELs (they
get looked up in different TLBs because the access
permissions can differ).

thanks
-- PMM
diff mbox series

Patch

diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
index a915c1247f..311ced44e6 100644
--- a/target/arm/helper-a64.h
+++ b/target/arm/helper-a64.h
@@ -102,3 +102,5 @@  DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, env, i64, i64)
 DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
 DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
 DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
+
+DEF_HELPER_3(check_hcr_el2_trap, void, env, int, int)
diff --git a/target/arm/internals.h b/target/arm/internals.h
index f5313dd3d4..5a55e960de 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -430,6 +430,14 @@  static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
         | (cv << 24) | (cond << 20) | (1 << 5);
 }
 
+static inline uint32_t syn_vmrs_trap(int rt, int reg)
+{
+    return (EC_FPIDTRAP << ARM_EL_EC_SHIFT)
+        | ARM_EL_IL
+        | (1 << 24) | (0xe << 20) | (7 << 14)
+        | (reg << 10) | (rt << 5) | 1;
+}
+
 static inline uint32_t syn_sve_access_trap(void)
 {
     return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
index 85c5ef897b..4c435b6c35 100644
--- a/target/arm/translate-vfp.inc.c
+++ b/target/arm/translate-vfp.inc.c
@@ -759,15 +759,21 @@  static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
     }
 
     if (a->l) {
+        TCGv_i32 tcg_rt, tcg_reg;
+
         /* VMRS, move VFP special register to gp register */
         switch (a->reg) {
+        case ARM_VFP_MVFR0:
+        case ARM_VFP_MVFR1:
+        case ARM_VFP_MVFR2:
         case ARM_VFP_FPSID:
+            tcg_rt = tcg_const_i32(a->rt);
+            tcg_reg = tcg_const_i32(a->reg);
+            gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, tcg_reg);
+            /* fall through */
         case ARM_VFP_FPEXC:
         case ARM_VFP_FPINST:
         case ARM_VFP_FPINST2:
-        case ARM_VFP_MVFR0:
-        case ARM_VFP_MVFR1:
-        case ARM_VFP_MVFR2:
             tmp = load_cpu_field(vfp.xregs[a->reg]);
             break;
         case ARM_VFP_FPSCR:
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
index 9710ef1c3e..44e538e51c 100644
--- a/target/arm/vfp_helper.c
+++ b/target/arm/vfp_helper.c
@@ -1322,4 +1322,31 @@  float64 HELPER(frint64_d)(float64 f, void *fpst)
     return frint_d(f, fpst, 64);
 }
 
+void HELPER(check_hcr_el2_trap)(CPUARMState *env, int rt, int reg)
+{
+    if (arm_current_el(env) != 1) {
+        return;
+    }
+
+    switch (reg) {
+    case ARM_VFP_MVFR0:
+    case ARM_VFP_MVFR1:
+    case ARM_VFP_MVFR2:
+        if (!(arm_hcr_el2_eff(env) & HCR_TID3)) {
+            return;
+        }
+        break;
+    case ARM_VFP_FPSID:
+        if (!(arm_hcr_el2_eff(env) & HCR_TID0)) {
+            return;
+        }
+        break;
+    default:
+        /* Shouldn't be here... */
+        return;
+    }
+
+    raise_exception(env, EXCP_HYP_TRAP, syn_vmrs_trap(rt, reg), 2);
+}
+
 #endif