@@ -244,6 +244,8 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
+int kvm_vcpu_ioctl_sei(struct kvm_vcpu *vcpu, u64 *syndrome);
+
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
@@ -248,6 +248,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+/*
+ * we only support SEI injection with specified synchronous
+ * in ARM64, not support it in ARM32.
+ */
+int kvm_vcpu_ioctl_sei(struct kvm_vcpu *vcpu, u64 *syndrome)
+{
+ return -EINVAL;
+}
+
int __attribute_const__ kvm_target_cpu(void)
{
switch (read_cpuid_part()) {
@@ -77,6 +77,7 @@
#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
+
#define ESR_ELx_IL (UL(1) << 25)
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
@@ -95,6 +96,7 @@
#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C)
+#define ESR_ELx_FSC_SERROR (0x11)
/* ISS field definitions for Data Aborts */
#define ESR_ELx_ISV (UL(1) << 24)
@@ -107,6 +109,15 @@
#define ESR_ELx_AR (UL(1) << 14)
#define ESR_ELx_CM (UL(1) << 8)
+/* ISS field definitions for SError interrupt */
+#define ESR_ELx_AET_SHIFT (10)
+#define ESR_ELx_AET (UL(0x7) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_UC (UL(0) << ESR_ELx_AET_SHIFT) /* Uncontainable */
+#define ESR_ELx_AET_UEU (UL(1) << ESR_ELx_AET_SHIFT) /* Uncorrected Unrecoverable */
+#define ESR_ELx_AET_UEO (UL(2) << ESR_ELx_AET_SHIFT) /* Uncorrected Restartable */
+#define ESR_ELx_AET_UER (UL(3) << ESR_ELx_AET_SHIFT) /* Uncorrected Recoverable */
+#define ESR_ELx_AET_CE (UL(6) << ESR_ELx_AET_SHIFT) /* Corrected */
+
/* ISS field definitions for exceptions taken in to Hyp */
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
@@ -155,6 +155,16 @@ static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
return vcpu->arch.fault.esr_el2;
}
+static inline u32 kvm_vcpu_get_vsesr(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.vsesr_el2;
+}
+
+static inline void kvm_vcpu_set_vsesr(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ vcpu->arch.fault.vsesr_el2 = val;
+}
+
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
@@ -88,6 +88,7 @@ struct kvm_vcpu_fault_info {
u32 esr_el2; /* Hyp Syndrom Register */
u64 far_el2; /* Hyp Fault Address Register */
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
+ u32 vsesr_el2; /* Virtual SError Exception Syndrome Register */
};
/*
@@ -381,6 +382,7 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
+int kvm_vcpu_ioctl_sei(struct kvm_vcpu *vcpu, u64 *syndrome);
static inline void __cpu_init_stage2(void)
{
@@ -86,6 +86,9 @@
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
+/* virtual SError exception syndrome register in armv8.2 */
+#define REG_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
+
#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
(!!x)<<8 | 0x1f)
#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
@@ -57,6 +57,7 @@ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
})
int handle_guest_sea(phys_addr_t addr, unsigned int esr);
+int handle_guest_sei(unsigned int esr);
#endif /* __ASSEMBLY__ */
@@ -313,6 +313,20 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+int kvm_vcpu_ioctl_sei(struct kvm_vcpu *vcpu, u64 *syndrome)
+{
+ u64 reg = *syndrome;
+
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && reg)
+ /* set vsesr_el2[24:0] with value that user space specified */
+ kvm_vcpu_set_vsesr(vcpu, reg & ESR_ELx_ISS_MASK);
+
+ /* inject virtual system Error or asynchronous abort */
+ kvm_inject_vabt(vcpu);
+
+ return 0;
+}
+
int __attribute_const__ kvm_target_cpu(void)
{
unsigned long implementor = read_cpuid_implementor();
@@ -28,6 +28,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
+#include <asm/system_misc.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -178,8 +179,55 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
return arm_exit_handlers[hsr_ec];
}
+/**
+ * kvm_handle_guest_sei - handles SError interrupt or asynchronous aborts
+ * @vcpu: the VCPU pointer
+ *
+ * For RAS SError interrupt, if the AET is [ESR_ELx_AET_UEU] and error
+ * is guest user space, then let host user space do the recovery, otherwise,
+ * directly inject virtual SError to guest or panic.
+ */
+static int kvm_handle_guest_sei(struct kvm_vcpu *vcpu)
+{
+ unsigned int esr = kvm_vcpu_get_hsr(vcpu);
+ bool impdef_syndrome = esr & ESR_ELx_ISV; /* aka IDS */
+ unsigned int aet = esr & ESR_ELx_AET;
+
+ /*
+ * In below three conditions, it will directly inject the virtual SError.
+ * 1. Not support RAS extension; the Syndrome is IMPLEMENTATION DEFINED;
+ * AET is RES0 if 'the value returned in the DFSC field is not
+ * [ESR_ELx_FSC_SERROR]'
+ */
+ if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN) || impdef_syndrome ||
+ ((esr & ESR_ELx_FSC) != ESR_ELx_FSC_SERROR)) {
+ kvm_inject_vabt(vcpu);
+ return 1;
+ }
+
+ switch (aet) {
+ case ESR_ELx_AET_CE: /* corrected error */
+ case ESR_ELx_AET_UEO: /* restartable error, not yet consumed */
+ return 0; /* continue processing the guest exit */
+ case ESR_ELx_AET_UEU: /* The error has not been propagated */
+ /*
+ * Only handle the guest user mode SEI if the error has not been propagated
+ */
+ if ((!vcpu_mode_priv(vcpu)) && !handle_guest_sei(kvm_vcpu_get_hsr(vcpu)))
+ return 1;
+
+ /* If SError handling is failed, continue run */
+ default:
+ /*
+ * Until now, the CPU supports RAS and SEI is fatal, or user space
+ * does not support to handle the SError.
+ */
+ panic("This Asynchronous SError interrupt is dangerous, panic");
+ }
+}
+
/*
- * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
+ * return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
*/
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
@@ -201,8 +249,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
*vcpu_pc(vcpu) -= adj;
}
- kvm_inject_vabt(vcpu);
- return 1;
+ return kvm_handle_guest_sei(vcpu);
}
exception_index = ARM_EXCEPTION_CODE(exception_index);
@@ -211,8 +258,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_EL1_SERROR:
- kvm_inject_vabt(vcpu);
- return 1;
+ return kvm_handle_guest_sei(vcpu);
case ARM_EXCEPTION_TRAP:
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
@@ -42,6 +42,13 @@ bool __hyp_text __fpsimd_enabled(void)
return __fpsimd_is_enabled()();
}
+static void __hyp_text __sysreg_set_vsesr(struct kvm_vcpu *vcpu)
+{
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) &&
+ (vcpu->arch.hcr_el2 & HCR_VSE))
+ write_sysreg_s(vcpu->arch.fault.vsesr_el2, REG_VSESR_EL2);
+}
+
static void __hyp_text __activate_traps_vhe(void)
{
u64 val;
@@ -86,6 +93,13 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
isb();
}
write_sysreg(val, hcr_el2);
+
+ /*
+ * If the virtual SError interrupt is taken to EL1 using AArch64,
+ * then VSESR_EL2 provides the syndrome value reported in ESR_EL1.
+ */
+ __sysreg_set_vsesr(vcpu);
+
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
/*
@@ -687,6 +687,40 @@ int handle_guest_sea(phys_addr_t addr, unsigned int esr)
return ret;
}
+/*
+ * Handle SError interrupt that occur in guest OS.
+ *
+ * The return value will be zero if the SEI was successfully handled
+ * and non-zero if handling is failed.
+ */
+int handle_guest_sei(unsigned int esr)
+{
+ int ret = 0;
+
+ struct siginfo info;
+
+ pr_alert("Asynchronous SError interrupt detected on CPU: %d, esr: %x\n",
+ smp_processor_id(), esr);
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_MCEERR_AR;
+ /*
+ * Because the address is not accurate for Asynchronous Aborts, so set
+ * NULL for the fault address
+ */
+ info.si_addr = NULL;
+
+ ret = force_sig_info(SIGBUS, &info, current);
+ if (ret < 0)
+ pr_info("Handle guest SEI: Error sending signal to %s:%d: %d\n",
+ current->comm, current->pid, ret);
+ else
+ ret = 0;
+
+ return ret;
+}
+
/*
* Dispatch a data abort to the relevant handler.
*/
@@ -1356,6 +1356,8 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_S390_CMMA_MIGRATION */
#define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
+#define KVM_ARM_SEI _IO(KVMIO, 0xb10)
+
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
@@ -1022,6 +1022,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
return -EFAULT;
return kvm_arm_vcpu_has_attr(vcpu, &attr);
}
+ case KVM_ARM_SEI: {
+ u64 syndrome;
+
+ if (copy_from_user(&syndrome, argp, sizeof(syndrome)))
+ return -EFAULT;
+ return kvm_vcpu_ioctl_sei(vcpu, &syndrome);
+ }
default:
return -EINVAL;
}