@@ -233,7 +233,7 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+#if defined(KVM_ARCH_WANT_MMU_NOTIFIER) || defined(CONFIG_MEMFD_OPS)
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
@@ -2012,4 +2012,10 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
/* Max number of entries allowed for each kvm dirty ring */
#define KVM_DIRTY_RING_MAX_ENTRIES 65536
+#ifdef CONFIG_MEMFD_OPS
+int kvm_memfd_invalidate_range(struct kvm *kvm, struct inode *inode,
+ unsigned long start, unsigned long end);
+#endif /* CONFIG_MEMFD_OPS */
+
+
#endif
@@ -468,7 +468,8 @@ void kvm_destroy_vcpus(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#if defined(CONFIG_MEMFD_OPS) ||\
+ (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -595,6 +596,30 @@ static __always_inline int __kvm_handle_useraddr_range(struct kvm *kvm,
/* The notifiers are averse to booleans. :-( */
return (int)ret;
}
+
+static void mn_active_invalidate_count_inc(struct kvm *kvm)
+{
+ spin_lock(&kvm->mn_invalidate_lock);
+ kvm->mn_active_invalidate_count++;
+ spin_unlock(&kvm->mn_invalidate_lock);
+
+}
+
+static void mn_active_invalidate_count_dec(struct kvm *kvm)
+{
+ bool wake;
+
+ spin_lock(&kvm->mn_invalidate_lock);
+ wake = (--kvm->mn_active_invalidate_count == 0);
+ spin_unlock(&kvm->mn_invalidate_lock);
+
+ /*
+ * There can only be one waiter, since the wait happens under
+ * slots_lock.
+ */
+ if (wake)
+ rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
+}
#endif
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
@@ -732,9 +757,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
*
* Pairs with the decrement in range_end().
*/
- spin_lock(&kvm->mn_invalidate_lock);
- kvm->mn_active_invalidate_count++;
- spin_unlock(&kvm->mn_invalidate_lock);
+ mn_active_invalidate_count_inc(kvm);
__kvm_handle_useraddr_range(kvm, &useraddr_range);
@@ -773,21 +796,11 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
.may_block = mmu_notifier_range_blockable(range),
.inode = NULL,
};
- bool wake;
__kvm_handle_useraddr_range(kvm, &useraddr_range);
/* Pairs with the increment in range_start(). */
- spin_lock(&kvm->mn_invalidate_lock);
- wake = (--kvm->mn_active_invalidate_count == 0);
- spin_unlock(&kvm->mn_invalidate_lock);
-
- /*
- * There can only be one waiter, since the wait happens under
- * slots_lock.
- */
- if (wake)
- rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
+ mn_active_invalidate_count_dec(kvm);
BUG_ON(kvm->mmu_notifier_count < 0);
}
@@ -872,6 +885,32 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+#ifdef CONFIG_MEMFD_OPS
+int kvm_memfd_invalidate_range(struct kvm *kvm, struct inode *inode,
+ unsigned long start, unsigned long end)
+{
+ int ret;
+ const struct kvm_useraddr_range useraddr_range = {
+ .start = start,
+ .end = end,
+ .pte = __pte(0),
+ .handler = kvm_unmap_gfn_range,
+ .on_lock = (void *)kvm_null_fn,
+ .flush_on_ret = true,
+ .may_block = false,
+ .inode = inode,
+ };
+
+
+ /* Prevent memslot modification */
+ mn_active_invalidate_count_inc(kvm);
+ ret = __kvm_handle_useraddr_range(kvm, &useraddr_range);
+ mn_active_invalidate_count_dec(kvm);
+
+ return ret;
+}
+#endif /* CONFIG_MEMFD_OPS */
+
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
static int kvm_pm_notifier_call(struct notifier_block *bl,
unsigned long state,
@@ -16,6 +16,8 @@ static const struct memfd_pfn_ops *memfd_ops;
static void memfd_invalidate_page_range(struct inode *inode, void *owner,
pgoff_t start, pgoff_t end)
{
+ kvm_memfd_invalidate_range(owner, inode, start >> PAGE_SHIFT,
+ end >> PAGE_SHIFT);
}
static void memfd_fallocate(struct inode *inode, void *owner,