Message ID | 20240613201756.3258227-4-oliver.upton@linux.dev (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: arm64: nv: FPSIMD/SVE, plus some other CPTR goodies | expand |
On Thu, 13 Jun 2024 21:17:44 +0100, Oliver Upton <oliver.upton@linux.dev> wrote: > > From: Marc Zyngier <maz@kernel.org> > > Handle CPACR_EL1 accesses when running a VHE guest. In order to > limit the cost of the emulation, implement it ass a shallow exit. > > In the other cases: > > - this is a nVHE L1 which will write to memory, and we don't trap > > - this is a L2 guest: > > * the L1 has CPTR_EL2.TCPAC==0, and the L2 has direct register > access > > * the L1 has CPTR_EL2.TCPAC==1, and the L2 will trap, but the > handling is defered to the general handling for forwarding > > Signed-off-by: Marc Zyngier <maz@kernel.org> > Signed-off-by: Oliver Upton <oliver.upton@linux.dev> > --- > arch/arm64/kvm/hyp/vhe/switch.c | 32 +++++++++++++++++++++++++++++++- > 1 file changed, 31 insertions(+), 1 deletion(-) > > diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c > index d7af5f46f22a..fed36457fef9 100644 > --- a/arch/arm64/kvm/hyp/vhe/switch.c > +++ b/arch/arm64/kvm/hyp/vhe/switch.c > @@ -262,10 +262,40 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code) > return true; > } > > +static bool kvm_hyp_handle_cpacr_el1(struct kvm_vcpu *vcpu, u64 *exit_code) > +{ > + u64 esr = kvm_vcpu_get_esr(vcpu); > + int rt; > + > + if (!is_hyp_ctxt(vcpu) || esr_sys64_to_sysreg(esr) != SYS_CPACR_EL1) > + return false; > + > + rt = kvm_vcpu_sys_get_rt(vcpu); > + > + if ((esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ) { > + vcpu_set_reg(vcpu, rt, __vcpu_sys_reg(vcpu, CPTR_EL2)); > + } else { > + vcpu_write_sys_reg(vcpu, vcpu_get_reg(vcpu, rt), CPTR_EL2); > + __activate_cptr_traps(vcpu); This doesn't bisect, as this helper is only introduced in patch #10. You probably want to keep it towards the end of the series. Thanks, M.
On Fri, Jun 14, 2024 at 02:20:54PM +0100, Marc Zyngier wrote: > On Thu, 13 Jun 2024 21:17:44 +0100, > Oliver Upton <oliver.upton@linux.dev> wrote: > > > > From: Marc Zyngier <maz@kernel.org> > > > > Handle CPACR_EL1 accesses when running a VHE guest. In order to > > limit the cost of the emulation, implement it ass a shallow exit. > > > > In the other cases: > > > > - this is a nVHE L1 which will write to memory, and we don't trap > > > > - this is a L2 guest: > > > > * the L1 has CPTR_EL2.TCPAC==0, and the L2 has direct register > > access > > > > * the L1 has CPTR_EL2.TCPAC==1, and the L2 will trap, but the > > handling is defered to the general handling for forwarding > > > > Signed-off-by: Marc Zyngier <maz@kernel.org> > > Signed-off-by: Oliver Upton <oliver.upton@linux.dev> > > --- > > arch/arm64/kvm/hyp/vhe/switch.c | 32 +++++++++++++++++++++++++++++++- > > 1 file changed, 31 insertions(+), 1 deletion(-) > > > > diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c > > index d7af5f46f22a..fed36457fef9 100644 > > --- a/arch/arm64/kvm/hyp/vhe/switch.c > > +++ b/arch/arm64/kvm/hyp/vhe/switch.c > > @@ -262,10 +262,40 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code) > > return true; > > } > > > > +static bool kvm_hyp_handle_cpacr_el1(struct kvm_vcpu *vcpu, u64 *exit_code) > > +{ > > + u64 esr = kvm_vcpu_get_esr(vcpu); > > + int rt; > > + > > + if (!is_hyp_ctxt(vcpu) || esr_sys64_to_sysreg(esr) != SYS_CPACR_EL1) > > + return false; > > + > > + rt = kvm_vcpu_sys_get_rt(vcpu); > > + > > + if ((esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ) { > > + vcpu_set_reg(vcpu, rt, __vcpu_sys_reg(vcpu, CPTR_EL2)); > > + } else { > > + vcpu_write_sys_reg(vcpu, vcpu_get_reg(vcpu, rt), CPTR_EL2); > > + __activate_cptr_traps(vcpu); > > This doesn't bisect, as this helper is only introduced in patch #10. > You probably want to keep it towards the end of the series. Ah, derp, I wanted to use the kvm_hyp_handle_sysreg_vhe() you introduced for the subsequent patch. I'll just move them both.
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index d7af5f46f22a..fed36457fef9 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -262,10 +262,40 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code) return true; } +static bool kvm_hyp_handle_cpacr_el1(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + u64 esr = kvm_vcpu_get_esr(vcpu); + int rt; + + if (!is_hyp_ctxt(vcpu) || esr_sys64_to_sysreg(esr) != SYS_CPACR_EL1) + return false; + + rt = kvm_vcpu_sys_get_rt(vcpu); + + if ((esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ) { + vcpu_set_reg(vcpu, rt, __vcpu_sys_reg(vcpu, CPTR_EL2)); + } else { + vcpu_write_sys_reg(vcpu, vcpu_get_reg(vcpu, rt), CPTR_EL2); + __activate_cptr_traps(vcpu); + } + + __kvm_skip_instr(vcpu); + + return true; +} + +static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + if (kvm_hyp_handle_cpacr_el1(vcpu, exit_code)) + return true; + + return kvm_hyp_handle_sysreg(vcpu, exit_code); +} + static const exit_handler_fn hyp_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = NULL, [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, - [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg, + [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg_vhe, [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,