@@ -607,7 +607,7 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
*/
static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu)
{
- u64 cptr = vcpu_read_sys_reg(vcpu, CPTR_EL2);
+ u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2);
if (!vcpu_el2_e2h_is_set(vcpu))
cptr = translate_cptr_el2_to_cpacr_el1(cptr);
@@ -67,6 +67,8 @@ static u64 __compute_hcr(struct kvm_vcpu *vcpu)
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{
+ u64 cptr;
+
/*
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
* CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
@@ -85,11 +87,23 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
__activate_traps_fpsimd32(vcpu);
}
+ if (!vcpu_has_nv(vcpu))
+ goto write;
+
+ /*
+ * The architecture is a bit crap (what a surprise): an EL2 guest
+ * writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA,
+ * as they are RES0 in the guest's view. To work around it, trap the
+ * sucker using the very same bit it can't set...
+ */
+ if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu))
+ val |= CPTR_EL2_TCPAC;
+
/*
* Layer the guest hypervisor's trap configuration on top of our own if
* we're in a nested context.
*/
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ if (is_hyp_ctxt(vcpu))
goto write;
if (guest_hyp_fpsimd_traps_enabled(vcpu))
@@ -97,6 +111,13 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
if (guest_hyp_sve_traps_enabled(vcpu))
val &= ~CPACR_ELx_ZEN;
+ cptr = vcpu_sanitised_cptr_el2(vcpu);
+
+ if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
+ val |= cptr & CPACR_ELx_E0POE;
+
+ val |= cptr & CPTR_EL2_TCPAC;
+
write:
write_sysreg(val, cpacr_el1);
}
We need to teach KVM a couple of new tricks. CPTR_EL2 and its VHE accessor CPACR_EL1 need to be handled specially: - CPACR_EL1 is trapped on VHE so that we can track the TCPAC and TTA bits - CPTR_EL2.{TCPAC,E0POE} are propagated from L1 to L2 As a consequence of CPTR_EL2 being always trapped, we update vcpu_sanitised_cptr_el2() so that it doesn't try to read from the CPU registers, but from the shadow copy (ensuring that we always have up-to-date TCPAC and TTA bits). This helper will also be used when handling the CPTR_EL1 trap. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/kvm_emulate.h | 2 +- arch/arm64/kvm/hyp/vhe/switch.c | 23 ++++++++++++++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-)