@@ -1832,6 +1832,45 @@ static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
}
}
+/* get a current mapped page fast, and test whether the page is writable. */
+static struct page *get_user_page_and_protection(unsigned long addr,
+ int *writable)
+{
+ struct page *page[1];
+
+ if (__get_user_pages_fast(addr, 1, 1, page) == 1) {
+ *writable = 1;
+ return page[0];
+ }
+ if (__get_user_pages_fast(addr, 1, 0, page) == 1) {
+ *writable = 0;
+ return page[0];
+ }
+ return NULL;
+}
+
+static pfn_t kvm_get_pfn_for_page_fault(struct kvm *kvm, gfn_t gfn,
+ int write_fault, int *host_writable)
+{
+ unsigned long addr;
+ struct page *page;
+
+ if (!write_fault) {
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr)) {
+ get_page(bad_page);
+ return page_to_pfn(bad_page);
+ }
+
+ page = get_user_page_and_protection(addr, host_writable);
+ if (page)
+ return page_to_pfn(page);
+ }
+
+ *host_writable = 1;
+ return kvm_get_pfn_for_gfn(kvm, gfn);
+}
+
static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync)
{
@@ -2085,6 +2124,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
int level;
pfn_t pfn;
unsigned long mmu_seq;
+ int host_writable;
level = mapping_level(vcpu, gfn);
@@ -2099,7 +2139,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
- pfn = kvm_get_pfn_for_gfn(vcpu->kvm, gfn);
+ pfn = kvm_get_pfn_for_page_fault(vcpu->kvm, gfn, write, &host_writable);
/* mmio */
if (is_error_pfn(pfn))
@@ -2109,7 +2149,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
- r = __direct_map(vcpu, v, write, level, gfn, pfn, true);
+ r = __direct_map(vcpu, v, write, level, gfn, pfn, host_writable);
spin_unlock(&vcpu->kvm->mmu_lock);
@@ -2307,6 +2347,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
int level;
gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq;
+ int write_fault = error_code & PFERR_WRITE_MASK;
+ int host_writable;
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -2321,15 +2363,16 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
- pfn = kvm_get_pfn_for_gfn(vcpu->kvm, gfn);
+ pfn = kvm_get_pfn_for_page_fault(vcpu->kvm, gfn, write_fault,
+ &host_writable);
if (is_error_pfn(pfn))
return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
- r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
- level, gfn, pfn, true);
+ r = __direct_map(vcpu, gpa, write_fault,
+ level, gfn, pfn, host_writable);
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
@@ -430,6 +430,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
unsigned long mmu_seq;
+ int host_writable;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
kvm_mmu_audit(vcpu, "pre page fault");
@@ -461,7 +462,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
- pfn = kvm_get_pfn_for_gfn(vcpu->kvm, walker.gfn);
+ pfn = kvm_get_pfn_for_page_fault(vcpu->kvm, walker.gfn, write_fault,
+ &host_writable);
/* mmio */
if (is_error_pfn(pfn))
@@ -472,7 +474,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
- level, &write_pt, pfn, true);
+ level, &write_pt, pfn, host_writable);
(void)sptep;
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
sptep, *sptep, write_pt);
@@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/vmstat.h>
#include <linux/highmem.h>
+#include <linux/module.h>
#include <asm/pgtable.h>
@@ -274,6 +275,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
return nr;
}
+EXPORT_SYMBOL_GPL(__get_user_pages_fast);
/**
* get_user_pages_fast() - pin user pages in memory