@@ -576,7 +576,7 @@ struct kvm_arch {
struct kvm_apic_map *apic_map;
unsigned int tss_addr;
- struct page *apic_access_page;
+ bool apic_access_page_done;
gpa_t wall_clock;
@@ -4002,7 +4002,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
int r = 0;
mutex_lock(&kvm->slots_lock);
- if (kvm->arch.apic_access_page)
+ if (kvm->arch.apic_access_page_done)
goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
kvm_userspace_mem.flags = 0;
@@ -4018,7 +4018,12 @@ static int alloc_apic_access_page(struct kvm *kvm)
goto out;
}
- kvm->arch.apic_access_page = page;
+ /*
+ * Do not pin apic access page in memory so that memory hotplug
+ * process is able to migrate it.
+ */
+ put_page(page);
+ kvm->arch.apic_access_page_done = true;
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -4534,8 +4539,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
}
if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
- vmcs_write64(APIC_ACCESS_ADDR,
- page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
+ kvm_vcpu_reload_apic_access_page(vcpu);
if (vmx_vm_has_apicv(vcpu->kvm))
memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
@@ -8003,8 +8007,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
} else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
exec_control |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
- vmcs_write64(APIC_ACCESS_ADDR,
- page_to_phys(vcpu->kvm->arch.apic_access_page));
+ kvm_vcpu_reload_apic_access_page(vcpu);
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
@@ -5991,6 +5991,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{
+ struct page *page = NULL;
+
/*
* Only APIC access page shared by L1 and L2 vm is handled. The APIC
* access page prepared by L1 for L2's execution is still pinned in
@@ -6003,10 +6005,16 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
* migrated, GUP will wait till the migrate entry is replaced
* with the new pte entry pointing to the new page.
*/
- vcpu->kvm->arch.apic_access_page = gfn_to_page(vcpu->kvm,
- APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+ page = gfn_to_page(vcpu->kvm,
+ APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
kvm_x86_ops->set_apic_access_page_addr(vcpu->kvm,
- page_to_phys(vcpu->kvm->arch.apic_access_page));
+ page_to_phys(page));
+
+ /*
+ * Do not pin apic access page in memory so that memory hotplug
+ * process is able to migrate it.
+ */
+ put_page(page);
}
}
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
@@ -7272,8 +7280,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kfree(kvm->arch.vpic);
kfree(kvm->arch.vioapic);
kvm_free_vcpus(kvm);
- if (kvm->arch.apic_access_page)
- put_page(kvm->arch.apic_access_page);
kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
}
To make apic access page migratable, we do not pin it in memory now. When it is migrated, we should reload its physical address for all vmcses. But when we tried to do this, all vcpu will access kvm_arch->apic_access_page without any locking. This is not safe. Actually, we do not need kvm_arch->apic_access_page anymore. Since apic access page is not pinned in memory now, we can remove kvm_arch->apic_access_page. When we need to write its physical address into vmcs, use gfn_to_page() to get its page struct, which will also pin it. And unpin it after then. Suggested-by: Gleb Natapov <gleb@kernel.org> Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com> --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/vmx.c | 15 +++++++++------ arch/x86/kvm/x86.c | 16 +++++++++++----- 3 files changed, 21 insertions(+), 12 deletions(-)