@@ -1781,6 +1781,53 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
return ret;
}
+bool kvm_map_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ struct kvm_vcpu *vcpu;
+ kvm_pfn_t pfn;
+ gfn_t gfn;
+ int idx;
+ bool ret = true;
+
+ /* Need vcpu context for kvm_mmu_do_page_fault. */
+ vcpu = kvm_get_vcpu(kvm, 0);
+ if (mutex_lock_killable(&vcpu->mutex))
+ return false;
+
+ vcpu_load(vcpu);
+ idx = srcu_read_lock(&kvm->srcu);
+
+ kvm_mmu_reload(vcpu);
+
+ gfn = range->start;
+ while (gfn < range->end) {
+ if (signal_pending(current)) {
+ ret = false;
+ break;
+ }
+
+ if (need_resched())
+ cond_resched();
+
+ pfn = kvm_mmu_do_page_fault(vcpu, gfn << PAGE_SHIFT,
+ PFERR_WRITE_MASK | PFERR_USER_MASK,
+ false);
+ if (is_error_noslot_pfn(pfn) || kvm->vm_bugged) {
+ ret = false;
+ break;
+ }
+
+ gfn++;
+ }
+
+ srcu_read_unlock(&kvm->srcu, idx);
+ vcpu_put(vcpu);
+
+ mutex_unlock(&vcpu->mutex);
+
+ return ret;
+}
+
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool flush = false;
@@ -237,6 +237,8 @@ struct kvm_gfn_range {
pte_t pte;
bool may_block;
};
+
+bool kvm_map_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -456,6 +456,11 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
#if defined(CONFIG_MEMFD_OPS) ||\
(defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
+bool __weak kvm_map_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ return false;
+}
+
typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,