@@ -526,7 +526,8 @@ struct kvm_vcpu_arch {
*/
static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
{
- if (unlikely(r >= __VNCR_START__ && ctxt->vncr_array))
+ if (unlikely(cpus_have_const_cap(ARM64_HAS_ENHANCED_NESTED_VIRT) &&
+ r >= __VNCR_START__ && ctxt->vncr_array))
return &ctxt->vncr_array[r - __VNCR_START__];
return (u64 *)&ctxt->sys_regs[r];
@@ -47,6 +47,12 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
if (!cpus_have_const_cap(ARM64_HAS_NESTED_VIRT))
return -EINVAL;
+ if (cpus_have_const_cap(ARM64_HAS_ENHANCED_NESTED_VIRT)) {
+ vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (!vcpu->arch.ctxt.vncr_array)
+ return -ENOMEM;
+ }
+
mutex_lock(&kvm->lock);
/*
@@ -64,6 +70,8 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
+ free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
+ vcpu->arch.ctxt.vncr_array = NULL;
} else {
kvm->arch.nested_mmus_size = num_mmus;
ret = 0;
@@ -207,6 +207,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kfree(vcpu->arch.sve_state);
+ free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
}
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
If running a NV guest on an ARMv8.4-NV capable system, let's allocate an additional page that will be used by the hypervisor to fulfill system register accesses. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/kvm_host.h | 3 ++- arch/arm64/kvm/nested.c | 8 ++++++++ arch/arm64/kvm/reset.c | 1 + 3 files changed, 11 insertions(+), 1 deletion(-)