@@ -289,6 +289,7 @@ void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
+unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, int *entry);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
@@ -913,15 +913,26 @@ static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
}
-unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
+unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, int *entry)
{
struct kvm_memory_slot *slot;
slot = gfn_to_memslot(kvm, gfn);
+
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva();
+
+ if (entry)
+ *entry = slot->npages - (gfn - slot->base_gfn);
+
return gfn_to_hva_memslot(slot, gfn);
}
+EXPORT_SYMBOL_GPL(gfn_to_hva_many);
+
+unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn_to_hva_many(kvm, gfn, NULL);
+}
EXPORT_SYMBOL_GPL(gfn_to_hva);
static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)