@@ -620,7 +620,7 @@ static __always_inline void kvm_write_cptr_el2(u64 val)
}
/* Resets the value of cptr_el2 when returning to the host. */
-static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
+static __always_inline void kvm_reset_cptr_el2(struct kvm *kvm)
{
u64 val;
@@ -631,14 +631,14 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
} else if (has_hvhe()) {
val = CPACR_ELx_FPEN;
- if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
+ if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
val |= CPACR_ELx_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_ELx_SMEN;
} else {
val = CPTR_NVHE_EL2_RES1;
- if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
+ if (kvm_has_sve(kvm) && guest_owns_fp_regs())
val |= CPTR_EL2_TZ;
if (!cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;
@@ -331,6 +331,8 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
/* Fine-Grained UNDEF initialised */
#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
+ /* SVE exposed to guest */
+#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
unsigned long flags;
/* VM-wide vCPU feature set */
@@ -862,8 +864,6 @@ struct kvm_vcpu_arch {
#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
-/* SVE exposed to guest */
-#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
/* SVE config completed */
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
/* KVM_ARM_VCPU_INIT completed */
@@ -956,8 +956,10 @@ struct kvm_vcpu_arch {
KVM_GUESTDBG_USE_HW | \
KVM_GUESTDBG_SINGLESTEP)
-#define vcpu_has_sve(vcpu) (system_supports_sve() && \
- vcpu_get_flag(vcpu, GUEST_HAS_SVE))
+#define kvm_has_sve(kvm) \
+ test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags)
+#define vcpu_has_sve(vcpu) \
+ kvm_has_sve((vcpu)->kvm)
#ifdef CONFIG_ARM64_PTR_AUTH
#define vcpu_has_ptrauth(vcpu) \
@@ -391,7 +391,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
if (!system_supports_fpsimd())
return false;
- sve_guest = vcpu_has_sve(vcpu);
+ sve_guest = kvm_has_sve(kern_hyp_va(vcpu->kvm));
esr_ec = kvm_vcpu_trap_get_class(vcpu);
/* Only handle traps the vCPU can support here: */
@@ -71,7 +71,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
isb();
- if (vcpu_has_sve(vcpu))
+ if (kvm_has_sve(kern_hyp_va(vcpu->kvm)))
__hyp_sve_save_guest(vcpu);
else
__fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
@@ -248,10 +248,13 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
{
struct kvm *kvm = &hyp_vm->kvm;
+ unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags);
DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
/* No restrictions for non-protected VMs. */
if (!kvm_vm_is_protected(kvm)) {
+ hyp_vm->kvm.arch.flags = host_arch_flags;
+
bitmap_copy(kvm->arch.vcpu_features,
host_kvm->arch.vcpu_features,
KVM_VCPU_MAX_FEATURES);
@@ -271,8 +274,10 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_GENERIC))
set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
- if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE))
+ if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE)) {
set_bit(KVM_ARM_VCPU_SVE, allowed_features);
+ kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_GUEST_HAS_SVE);
+ }
bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
allowed_features, KVM_VCPU_MAX_FEATURES);
@@ -308,10 +313,8 @@ static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *
{
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
- if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
- vcpu_clear_flag(vcpu, GUEST_HAS_SVE);
+ if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
- }
}
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
@@ -44,7 +44,7 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
if (guest_owns_fp_regs()) {
val |= CPACR_ELx_FPEN;
- if (vcpu_has_sve(vcpu))
+ if (kvm_has_sve(kern_hyp_va(vcpu->kvm)))
val |= CPACR_ELx_ZEN;
}
} else {
@@ -56,7 +56,7 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
*/
val |= CPTR_EL2_TSM;
- if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
+ if (!kvm_has_sve(kern_hyp_va(vcpu->kvm)) || !guest_owns_fp_regs())
val |= CPTR_EL2_TZ;
if (!guest_owns_fp_regs())
@@ -119,7 +119,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
- kvm_reset_cptr_el2(vcpu);
+ kvm_reset_cptr_el2(kern_hyp_va(vcpu->kvm));
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
}
@@ -203,7 +203,7 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
__hyp_sve_save_host();
/* Re-enable SVE traps if not supported for the guest vcpu. */
- if (!vcpu_has_sve(vcpu))
+ if (!kvm_has_sve(kern_hyp_va(vcpu->kvm)))
cpacr_clear_set(CPACR_ELx_ZEN, 0);
} else {
@@ -207,7 +207,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
- kvm_reset_cptr_el2(vcpu);
+ kvm_reset_cptr_el2(vcpu->kvm);
if (!arm64_kernel_unmapped_at_el0())
host_vectors = __this_cpu_read(this_cpu_vector);
@@ -85,7 +85,7 @@ static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
* KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
* kvm_arm_vcpu_finalize(), which freezes the configuration.
*/
- vcpu_set_flag(vcpu, GUEST_HAS_SVE);
+ set_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &vcpu->kvm->arch.flags);
}
/*
The vcpu flag GUEST_HAS_SVE is per-vcpu, but it is based on what is now a per-vm feature. Make the flag per-vm. Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/include/asm/kvm_emulate.h | 6 +++--- arch/arm64/include/asm/kvm_host.h | 10 ++++++---- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 2 +- arch/arm64/kvm/hyp/nvhe/pkvm.c | 11 +++++++---- arch/arm64/kvm/hyp/nvhe/switch.c | 8 ++++---- arch/arm64/kvm/hyp/vhe/switch.c | 2 +- arch/arm64/kvm/reset.c | 2 +- 8 files changed, 24 insertions(+), 19 deletions(-)