@@ -2078,6 +2078,11 @@
[KVM,ARM] Allow use of GICv4 for direct injection of
LPIs.
+ kvm-arm.vm_msr_trap=
+ [KVM,ARM] Trap guest accesses to address translation
+ related system registers. Can be set to 0 to turn off
+ erratum 219 workaround on Cavium ThunderX2.
+
kvm-intel.ept= [KVM,Intel] Disable extended page tables
(virtualized MMU) support on capable Intel chips.
Default is 1 (enabled)
@@ -42,6 +42,7 @@
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
static inline int kvm_arm_init_sve(void) { return 0; }
+static inline int kvm_arm_config_vm_msr_trap(void) { }
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
@@ -50,6 +50,8 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
extern unsigned int kvm_sve_max_vl;
int kvm_arm_init_sve(void);
+void kvm_arm_config_vm_msr_trap(void);
+
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
@@ -120,10 +120,15 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
write_sysreg(val, cptr_el2);
}
+/* Key to set HCR_EL2.TVM when running a guest */
+static DEFINE_STATIC_KEY_FALSE(vm_msr_trap);
+
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
{
u64 hcr = vcpu->arch.hcr_el2;
+ if (static_branch_unlikely(&vm_msr_trap))
+ hcr |= HCR_TVM;
write_sysreg(hcr, hcr_el2);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
@@ -174,8 +179,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
* the crucial bit is "On taking a vSError interrupt,
* HCR_EL2.VSE is cleared to 0."
*/
- if (vcpu->arch.hcr_el2 & HCR_VSE)
- vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
+ if (vcpu->arch.hcr_el2 & HCR_VSE) {
+ vcpu->arch.hcr_el2 &= ~HCR_VSE;
+ vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
+ }
if (has_vhe())
deactivate_traps_vhe();
@@ -380,6 +387,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
return true;
}
+static bool __hyp_text handle_hcr_tvm(struct kvm_vcpu *vcpu)
+{
+ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ u64 val = vcpu_get_reg(vcpu, rt);
+
+ /*
+ * The normal sysreg handling code expects to see the traps,
+ * let's not do anything here.
+ */
+ if (vcpu->arch.hcr_el2 & HCR_TVM)
+ return false;
+
+ switch (sysreg) {
+ case SYS_SCTLR_EL1:
+ write_sysreg_el1(val, SYS_SCTLR);
+ break;
+ case SYS_TTBR0_EL1:
+ write_sysreg_el1(val, SYS_TTBR0);
+ break;
+ case SYS_TTBR1_EL1:
+ write_sysreg_el1(val, SYS_TTBR1);
+ break;
+ case SYS_TCR_EL1:
+ write_sysreg_el1(val, SYS_TCR);
+ break;
+ case SYS_ESR_EL1:
+ write_sysreg_el1(val, SYS_ESR);
+ break;
+ case SYS_FAR_EL1:
+ write_sysreg_el1(val, SYS_FAR);
+ break;
+ case SYS_AFSR0_EL1:
+ write_sysreg_el1(val, SYS_AFSR0);
+ break;
+ case SYS_AFSR1_EL1:
+ write_sysreg_el1(val, SYS_AFSR1);
+ break;
+ case SYS_MAIR_EL1:
+ write_sysreg_el1(val, SYS_MAIR);
+ break;
+ case SYS_AMAIR_EL1:
+ write_sysreg_el1(val, SYS_AMAIR);
+ break;
+ case SYS_CONTEXTIDR_EL1:
+ write_sysreg_el1(val, SYS_CONTEXTIDR);
+ break;
+ default:
+ return false;
+ }
+
+ __kvm_skip_instr(vcpu);
+ return true;
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -399,6 +461,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
+ if (static_branch_unlikely(&vm_msr_trap) &&
+ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
+ handle_hcr_tvm(vcpu))
+ return true;
+
/*
* We trap the first access to the FP/SIMD to save the host context
* and restore the guest context lazily.
@@ -718,3 +785,47 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
unreachable();
}
+
+/* command line setting, values : -1 : off, 1 : on, 0 : not set */
+static int vm_msr_trap_arg;
+
+static int __init handle_early_vm_msr_trap_arg(char *buf)
+{
+ bool val;
+ int rv;
+
+ rv = strtobool(buf, &val);
+ vm_msr_trap_arg = val ? 1 : -1;
+ return rv;
+}
+early_param("kvm-arm.vm_msr_trap", handle_early_vm_msr_trap_arg);
+
+void kvm_arm_config_vm_msr_trap(void)
+{
+ bool needed = false;
+
+#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219)) {
+ int i;
+
+ /* needed if Aff0 of any CPU is non-zero, i.e, SMT > 1 */
+ for_each_possible_cpu(i) {
+ if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) {
+ needed = true;
+ break;
+ }
+ }
+ }
+#endif
+ if (needed) {
+ if (vm_msr_trap_arg == -1)
+ pr_warn("KVM: Cavium ThunderX2 erratum 219 workaround forced off!\n");
+ else
+ vm_msr_trap_arg = 1;
+ }
+
+ if (vm_msr_trap_arg > 0) {
+ static_branch_enable(&vm_msr_trap);
+ kvm_info("Using HCR_EL2.TVM to trap guest VM updates.\n");
+ }
+}
@@ -1696,6 +1696,8 @@ int kvm_arch_init(void *opaque)
if (err)
return err;
+ kvm_arm_config_vm_msr_trap();
+
if (!in_hyp_mode) {
err = init_hyp_mode();
if (err)