@@ -11,6 +11,7 @@
#ifndef __ARM64_KVM_EMULATE_H__
#define __ARM64_KVM_EMULATE_H__
+#include <linux/bitfield.h>
#include <linux/kvm_host.h>
#include <asm/debug-monitors.h>
@@ -599,4 +600,46 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
kvm_write_cptr_el2(val);
}
+
+/*
+ * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
+ * format if E2H isn't set.
+ */
+static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu)
+{
+ u64 cptr = vcpu_read_sys_reg(vcpu, CPTR_EL2);
+
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ cptr = translate_cptr_el2_to_cpacr_el1(cptr);
+
+ return cptr;
+}
+
+static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
+ unsigned int xen)
+{
+ switch (xen) {
+ case 0b00:
+ case 0b10:
+ return true;
+ case 0b01:
+ return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu);
+ case 0b11:
+ default:
+ return false;
+ }
+}
+
+#define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
+ (!vcpu_has_nv(vcpu) ? false : \
+ ____cptr_xen_trap_enabled(vcpu, \
+ SYS_FIELD_GET(CPACR_ELx, xen, \
+ vcpu_sanitised_cptr_el2(vcpu))))
+
+static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
+{
+ return __guest_hyp_cptr_xen_trap_enabled(vcpu, FPEN);
+}
+
+
#endif /* __ARM64_KVM_EMULATE_H__ */
@@ -75,5 +75,4 @@ static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
return false;
}
#endif
-
#endif /* __ARM64_KVM_NESTED_H */
@@ -94,11 +94,19 @@ static int handle_smc(struct kvm_vcpu *vcpu)
}
/*
- * Guest access to FP/ASIMD registers are routed to this handler only
- * when the system doesn't support FP/ASIMD.
+ * This handles the cases where the system does not support FP/ASIMD or when
+ * we are running nested virtualization and the guest hypervisor is trapping
+ * FP/ASIMD accesses by its guest guest.
+ *
+ * All other handling of guest vs. host FP/ASIMD register state is handled in
+ * fixup_guest_exit().
*/
-static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
+static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu)
{
+ if (guest_hyp_fpsimd_traps_enabled(vcpu))
+ return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+
+ /* This is the case when the system doesn't support FP/ASIMD. */
kvm_inject_undefined(vcpu);
return 1;
}
@@ -304,7 +312,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
- [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
+ [ESR_ELx_EC_FP_ASIMD] = kvm_handle_fpasimd,
[ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
};
@@ -341,6 +341,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
/* Only handle traps the vCPU can support here: */
switch (esr_ec) {
case ESR_ELx_EC_FP_ASIMD:
+ /* Forward traps to the guest hypervisor as required */
+ if (guest_hyp_fpsimd_traps_enabled(vcpu))
+ return false;
break;
case ESR_ELx_EC_SVE:
if (!sve_guest)