@@ -155,6 +155,16 @@ static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
return vcpu->arch.fault.esr_el2;
}
+static inline u32 kvm_vcpu_get_vsesr(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.vsesr_el2;
+}
+
+static inline void kvm_vcpu_set_vsesr(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ vcpu->arch.fault.vsesr_el2 = val;
+}
+
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
@@ -88,6 +88,7 @@ struct kvm_vcpu_fault_info {
u32 esr_el2; /* Hyp Syndrom Register */
u64 far_el2; /* Hyp Fault Address Register */
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
+ u32 vsesr_el2; /* Virtual SError Exception Syndrome Register */
};
/*
@@ -86,6 +86,9 @@
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
+/* virtual SError exception syndrome register in armv8.2 */
+#define REG_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
+
#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
(!!x)<<8 | 0x1f)
#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
@@ -57,7 +57,6 @@ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
})
int handle_guest_sea(phys_addr_t addr, unsigned int esr);
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SYSTEM_MISC_H */
@@ -279,7 +279,16 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
int kvm_arm_set_sei_esr(struct kvm_vcpu *vcpu, u64 *syndrome)
{
- return -EINVAL;
+ u64 reg = *syndrome;
+
+ /* inject virtual system Error or asynchronous abort */
+ kvm_inject_vabt(vcpu);
+
+ if (reg)
+ /* set vsesr_el2[24:0] with value that user space specified */
+ kvm_vcpu_set_vsesr(vcpu, reg & ESR_ELx_ISS_MASK);
+
+ return 0;
}
int __attribute_const__ kvm_target_cpu(void)
@@ -67,6 +67,14 @@ static hyp_alternate_select(__activate_traps_arch,
__activate_traps_nvhe, __activate_traps_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
+static void __hyp_text __sysreg_set_vsesr(struct kvm_vcpu *vcpu)
+{
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) &&
+ (vcpu->arch.hcr_el2 & HCR_VSE))
+ write_sysreg_s(vcpu->arch.fault.vsesr_el2, REG_VSESR_EL2);
+}
+
+
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
@@ -86,6 +94,13 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
isb();
}
write_sysreg(val, hcr_el2);
+
+ /*
+ * If the virtual SError interrupt is taken to EL1 using AArch64,
+ * then VSESR_EL2 provides the syndrome value reported in ESR_EL1.
+ */
+ __sysreg_set_vsesr(vcpu);
+
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
/*
@@ -232,14 +232,25 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
inject_undef64(vcpu);
}
+static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
+{
+ kvm_vcpu_set_vsesr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
+ vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
+}
+
/**
* kvm_inject_vabt - inject an async abort / SError into the guest
* @vcpu: The VCPU to receive the exception
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
+ *
+ * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
+ * the remaining ISS all-zeros so that this error is not interpreted as an
+ * uncatagorized RAS error. Without the RAS Extensions we can't specify an ESR
+ * value, so the CPU generates an imp-def value.
*/
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
{
- vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
+ pend_guest_serror(vcpu, ESR_ELx_ISV);
}