@@ -5,6 +5,7 @@
#include <linux/kvm_host.h>
static inline bool nested_virt_in_use(const struct kvm_vcpu *vcpu) { return false; }
+static inline bool enhanced_nested_virt_in_use(const struct kvm_vcpu *vcpu) { return false; }
static inline void check_nested_vcpu_requests(struct kvm_vcpu *vcpu) {}
#endif /* __ARM_KVM_NESTED_H */
@@ -60,7 +60,8 @@
#define ARM64_HAS_RNG 50
#define ARM64_HAS_NESTED_VIRT 51
#define ARM64_HAS_ARMv8_4_TTL 52
+#define ARM64_HAS_ENHANCED_NESTED_VIRT 53
-#define ARM64_NCAPS 53
+#define ARM64_NCAPS 54
#endif /* __ASM_CPUCAPS_H */
@@ -11,6 +11,12 @@ static inline bool nested_virt_in_use(const struct kvm_vcpu *vcpu)
test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features);
}
+static inline bool enhanced_nested_virt_in_use(const struct kvm_vcpu *vcpu)
+{
+ return cpus_have_const_cap(ARM64_HAS_ENHANCED_NESTED_VIRT) &&
+ nested_virt_in_use(vcpu);
+}
+
extern void kvm_init_nested(struct kvm *kvm);
extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
@@ -1454,6 +1454,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64MMFR2_NV_SHIFT,
.min_field_value = 1,
},
+ {
+ .desc = "Enhanced Nested Virtualization Support",
+ .capability = ARM64_HAS_ENHANCED_NESTED_VIRT,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_nested_virt_support,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_NV_SHIFT,
+ .min_field_value = 2,
+ },
#endif /* CONFIG_ARM64_VHE */
{
.desc = "32-bit EL0 Support",
Add the detection code for the ARMv8.4-NV feature. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm/include/asm/kvm_nested.h | 1 + arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/kvm_nested.h | 6 ++++++ arch/arm64/kernel/cpufeature.c | 10 ++++++++++ 4 files changed, 19 insertions(+), 1 deletion(-)