@@ -4022,7 +4022,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
if (r)
goto out;
- page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+ page = gfn_to_page_no_pin(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (is_error_page(page)) {
r = -EFAULT;
goto out;
@@ -5996,7 +5996,7 @@ static void vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
* GUP will wait till the migrate entry is replaced with the new pte
* entry pointing to the new page.
*/
- vcpu->kvm->arch.apic_access_page = gfn_to_page(vcpu->kvm,
+ vcpu->kvm->arch.apic_access_page = gfn_to_page_no_pin(vcpu->kvm,
APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
kvm_x86_ops->set_apic_access_page_addr(vcpu->kvm,
page_to_phys(vcpu->kvm->arch.apic_access_page));
@@ -7255,8 +7255,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kfree(kvm->arch.vpic);
kfree(kvm->arch.vioapic);
kvm_free_vcpus(kvm);
- if (kvm->arch.apic_access_page)
- put_page(kvm->arch.apic_access_page);
kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
}
@@ -526,6 +526,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
int nr_pages);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
+struct page *gfn_to_page_no_pin(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
@@ -1386,9 +1386,24 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
return kvm_pfn_to_page(pfn);
}
-
EXPORT_SYMBOL_GPL(gfn_to_page);
+struct page *gfn_to_page_no_pin(struct kvm *kvm, gfn_t gfn)
+{
+ struct page *page = gfn_to_page(kvm, gfn);
+
+ /*
+ * gfn_to_page() will finally call hva_to_pfn() to get the pfn, and pin
+ * the page in memory by calling GUP functions. This function unpins
+ * the page.
+ */
+ if (!is_error_page(page))
+ put_page(page);
+
+ return page;
+}
+EXPORT_SYMBOL_GPL(gfn_to_page_no_pin);
+
void kvm_release_page_clean(struct page *page)
{
WARN_ON(is_error_page(page));
gfn_to_page() will finally call hva_to_pfn() to get the pfn, and pin the page in memory by calling GUP functions. This function unpins the page. After this patch, acpi access page is able to be migrated. Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com> --- arch/x86/kvm/vmx.c | 2 +- arch/x86/kvm/x86.c | 4 +--- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 17 ++++++++++++++++- 4 files changed, 19 insertions(+), 5 deletions(-)