@@ -708,7 +708,6 @@ struct kvm_vcpu_arch {
u64 hcr_el2;
u64 hcrx_el2;
u64 mdcr_el2;
- u64 cptr_el2;
/* Exception Information */
struct kvm_vcpu_fault_info fault;
@@ -1546,7 +1546,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
}
vcpu_reset_hcr(vcpu);
- vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
/*
* Handle the "start in power-off" case.
@@ -83,44 +83,6 @@ static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 = val;
}
-static void pvm_init_traps_cptr(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = vcpu->kvm;
- u64 val = vcpu->arch.cptr_el2;
-
- if (!has_hvhe()) {
- val |= CPTR_NVHE_EL2_RES1;
- val &= ~(CPTR_NVHE_EL2_RES0);
- }
-
- if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
- val |= CPTR_EL2_TAM;
-
- /* SVE can be disabled by userspace even if supported. */
- if (!vcpu_has_sve(vcpu)) {
- if (has_hvhe())
- val &= ~(CPACR_ELx_ZEN);
- else
- val |= CPTR_EL2_TZ;
- }
-
- /* No SME support in KVM. */
- BUG_ON(kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP));
- if (has_hvhe())
- val &= ~(CPACR_ELx_SMEN);
- else
- val |= CPTR_EL2_TSM;
-
- if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP)) {
- if (has_hvhe())
- val |= CPACR_EL1_TTA;
- else
- val |= CPTR_EL2_TTA;
- }
-
- vcpu->arch.cptr_el2 = val;
-}
-
static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
@@ -191,7 +153,6 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
int ret;
- vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
vcpu->arch.mdcr_el2 = 0;
pkvm_vcpu_reset_hcr(vcpu);
@@ -204,7 +165,6 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
return ret;
pvm_init_traps_hcr(vcpu);
- pvm_init_traps_cptr(vcpu);
pvm_init_traps_mdcr(vcpu);
return 0;
@@ -644,8 +604,6 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
return ret;
}
- hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
-
return 0;
}
@@ -35,14 +35,10 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
-static void __activate_traps(struct kvm_vcpu *vcpu)
+static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{
- u64 val;
-
- ___activate_traps(vcpu, vcpu->arch.hcr_el2);
- __activate_traps_common(vcpu);
+ u64 val = kvm_get_reset_cptr_el2(vcpu);
- val = vcpu->arch.cptr_el2;
val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
if (cpus_have_final_cap(ARM64_SME)) {
@@ -61,7 +57,29 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_fpsimd32(vcpu);
}
+ if (vcpu_is_protected(vcpu)) {
+ struct kvm *kvm = vcpu->kvm;
+
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
+ val |= CPTR_EL2_TAM;
+
+ if (!vcpu_has_sve(vcpu)) {
+ if (has_hvhe())
+ val &= ~CPACR_ELx_ZEN;
+ else
+ val |= CPTR_EL2_TZ;
+ }
+ }
+
kvm_write_cptr_el2(val);
+}
+
+static void __activate_traps(struct kvm_vcpu *vcpu)
+{
+ ___activate_traps(vcpu, vcpu->arch.hcr_el2);
+ __activate_traps_common(vcpu);
+ __activate_cptr_traps(vcpu);
+
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
Similar to VHE, calculate the value of cptr_el2 from scratch on activate traps. This removes the need to store cptr_el2 in every vcpu structure. Moreover, some traps, such as whether the guest owns the fp registers, need to be set on every vcpu run. Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/include/asm/kvm_host.h | 1 - arch/arm64/kvm/arm.c | 1 - arch/arm64/kvm/hyp/nvhe/pkvm.c | 42 ------------------------------- arch/arm64/kvm/hyp/nvhe/switch.c | 30 +++++++++++++++++----- 4 files changed, 24 insertions(+), 50 deletions(-)