@@ -188,6 +188,7 @@ static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
}
int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu);
+u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val);
#ifdef CONFIG_ARM64_PTR_AUTH
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
@@ -804,7 +804,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
* This list should get updated as new features get added to the NV
* support, and new extension to the architecture.
*/
-static u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
+u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
{
switch (reg) {
case SYS_ID_AA64ISAR0_EL1:
@@ -929,47 +929,6 @@ static u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
return val;
}
-static void limit_nv_id_regs(struct kvm *kvm)
-{
- u64 val;
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64ISAR0_EL1);
- val = limit_nv_id_reg(kvm, SYS_ID_AA64ISAR0_EL1, val);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64ISAR0_EL1, val);
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64ISAR1_EL1);
- val = limit_nv_id_reg(kvm, SYS_ID_AA64ISAR1_EL1, val);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64ISAR1_EL1, val);
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1);
- val = limit_nv_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR1_EL1);
- val = limit_nv_id_reg(kvm, SYS_ID_AA64PFR1_EL1, val);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR1_EL1, val);
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1);
- val = limit_nv_id_reg(kvm, SYS_ID_AA64MMFR0_EL1, val);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1, val);
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR1_EL1);
- val = limit_nv_id_reg(kvm, SYS_ID_AA64MMFR1_EL1, val);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR1_EL1, val);
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR2_EL1);
- val = limit_nv_id_reg(kvm, SYS_ID_AA64MMFR2_EL1, val);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR2_EL1, val);
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR4_EL1);
- val = limit_nv_id_reg(kvm, SYS_ID_AA64MMFR4_EL1, val);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR4_EL1, val);
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
- val = limit_nv_id_reg(kvm, SYS_ID_AA64DFR0_EL1, val);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1, val);
-}
-
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
enum vcpu_sysreg sr, u64 v)
{
@@ -1014,8 +973,6 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
if (!kvm->arch.sysreg_masks)
return -ENOMEM;
- limit_nv_id_regs(kvm);
-
/* VTTBR_EL2 */
res0 = res1 = 0;
if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
@@ -1638,6 +1638,9 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
break;
}
+ if (vcpu_has_nv(vcpu))
+ val = limit_nv_id_reg(vcpu->kvm, id, val);
+
return val;
}
Instead of applying the NV idreg limits at run time, switch to doing it at the same time as the reset of the VM initialisation. This will make things much simpler once we introduce vcpu-driven variants of NV. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/kvm_nested.h | 1 + arch/arm64/kvm/nested.c | 45 +---------------------------- arch/arm64/kvm/sys_regs.c | 3 ++ 3 files changed, 5 insertions(+), 44 deletions(-)