@@ -288,6 +288,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm);
+int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn,
+ struct page **pages, int nr_pages, bool *enough);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
@@ -923,15 +923,25 @@ static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
}
-unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
+static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, int *entry)
{
struct kvm_memory_slot *slot;
slot = gfn_to_memslot(kvm, gfn);
+
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva();
+
+ if (entry)
+ *entry = slot->npages - (gfn - slot->base_gfn);
+
return gfn_to_hva_memslot(slot, gfn);
}
+
+unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn_to_hva_many(kvm, gfn, NULL);
+}
EXPORT_SYMBOL_GPL(gfn_to_hva);
static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
@@ -1011,6 +1021,23 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
return hva_to_pfn(kvm, addr, false);
}
+int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn,
+ struct page **pages, int nr_pages, bool *enough)
+{
+ unsigned long addr;
+ int entry, ret;
+
+ addr = gfn_to_hva_many(kvm, gfn, &entry);
+ if (kvm_is_error_hva(addr))
+ return -1;
+
+ entry = min(entry, nr_pages);
+ *enough = (entry == nr_pages) ? true : false;
+ ret = __get_user_pages_fast(addr, entry, 1, pages);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
+
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
pfn_t pfn;