@@ -5,6 +5,7 @@
* Copyright 2019 Marvell. All rights reserved.
*/
#include <linux/xarray.h>
+#include <linux/sched/mm.h>
#include "uverbs.h"
#include "core_priv.h"
@@ -365,3 +366,87 @@ int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
U32_MAX);
}
EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
+
+void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
+{
+ struct rdma_umap_priv *priv, *next_priv;
+
+ lockdep_assert_held(&ufile->hw_destroy_rwsem);
+
+ while (1) {
+ struct mm_struct *mm = NULL;
+
+ /* Get an arbitrary mm pointer that hasn't been cleaned yet */
+ mutex_lock(&ufile->umap_lock);
+ while (!list_empty(&ufile->umaps)) {
+ int ret;
+
+ priv = list_first_entry(&ufile->umaps,
+ struct rdma_umap_priv, list);
+ mm = priv->vma->vm_mm;
+ ret = mmget_not_zero(mm);
+ if (!ret) {
+ list_del_init(&priv->list);
+ if (priv->entry) {
+ rdma_user_mmap_entry_put(priv->entry);
+ priv->entry = NULL;
+ }
+ mm = NULL;
+ continue;
+ }
+ break;
+ }
+ mutex_unlock(&ufile->umap_lock);
+ if (!mm)
+ return;
+
+ /*
+ * The umap_lock is nested under mmap_lock since it used within
+ * the vma_ops callbacks, so we have to clean the list one mm
+ * at a time to get the lock ordering right. Typically there
+ * will only be one mm, so no big deal.
+ */
+ mmap_read_lock(mm);
+ mutex_lock(&ufile->umap_lock);
+ list_for_each_entry_safe(priv, next_priv, &ufile->umaps, list) {
+ struct vm_area_struct *vma = priv->vma;
+
+ if (vma->vm_mm != mm)
+ continue;
+ list_del_init(&priv->list);
+
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+
+ if (priv->entry) {
+ rdma_user_mmap_entry_put(priv->entry);
+ priv->entry = NULL;
+ }
+ }
+ mutex_unlock(&ufile->umap_lock);
+ mmap_read_unlock(mm);
+ mmput(mm);
+ }
+}
+EXPORT_SYMBOL(uverbs_user_mmap_disassociate);
+
+/**
+ * rdma_user_mmap_disassociate() - disassociate the mmap from the ucontext.
+ *
+ * @ucontext: associated user context.
+ *
+ * This function should be called by drivers that need to disable mmap for
+ * some ucontexts.
+ */
+void rdma_user_mmap_disassociate(struct ib_ucontext *ucontext)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+
+ /* Racing with uverbs_destroy_ufile_hw */
+ if (!down_read_trylock(&ufile->hw_destroy_rwsem))
+ return;
+
+ uverbs_user_mmap_disassociate(ufile);
+ up_read(&ufile->hw_destroy_rwsem);
+}
+EXPORT_SYMBOL(rdma_user_mmap_disassociate);
@@ -149,7 +149,6 @@ void uverbs_disassociate_api(struct uverbs_api *uapi);
void uverbs_destroy_api(struct uverbs_api *uapi);
void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
unsigned int num_attrs);
-void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile);
extern const struct uapi_definition uverbs_def_obj_async_fd[];
extern const struct uapi_definition uverbs_def_obj_counters[];
@@ -45,7 +45,6 @@
#include <linux/cdev.h>
#include <linux/anon_inodes.h>
#include <linux/slab.h>
-#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@@ -817,69 +816,6 @@ static const struct vm_operations_struct rdma_umap_ops = {
.fault = rdma_umap_fault,
};
-void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
-{
- struct rdma_umap_priv *priv, *next_priv;
-
- lockdep_assert_held(&ufile->hw_destroy_rwsem);
-
- while (1) {
- struct mm_struct *mm = NULL;
-
- /* Get an arbitrary mm pointer that hasn't been cleaned yet */
- mutex_lock(&ufile->umap_lock);
- while (!list_empty(&ufile->umaps)) {
- int ret;
-
- priv = list_first_entry(&ufile->umaps,
- struct rdma_umap_priv, list);
- mm = priv->vma->vm_mm;
- ret = mmget_not_zero(mm);
- if (!ret) {
- list_del_init(&priv->list);
- if (priv->entry) {
- rdma_user_mmap_entry_put(priv->entry);
- priv->entry = NULL;
- }
- mm = NULL;
- continue;
- }
- break;
- }
- mutex_unlock(&ufile->umap_lock);
- if (!mm)
- return;
-
- /*
- * The umap_lock is nested under mmap_lock since it used within
- * the vma_ops callbacks, so we have to clean the list one mm
- * at a time to get the lock ordering right. Typically there
- * will only be one mm, so no big deal.
- */
- mmap_read_lock(mm);
- mutex_lock(&ufile->umap_lock);
- list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
- list) {
- struct vm_area_struct *vma = priv->vma;
-
- if (vma->vm_mm != mm)
- continue;
- list_del_init(&priv->list);
-
- zap_vma_ptes(vma, vma->vm_start,
- vma->vm_end - vma->vm_start);
-
- if (priv->entry) {
- rdma_user_mmap_entry_put(priv->entry);
- priv->entry = NULL;
- }
- }
- mutex_unlock(&ufile->umap_lock);
- mmap_read_unlock(mm);
- mmput(mm);
- }
-}
-
/*
* ib_uverbs_open() does not need the BKL:
*
@@ -2947,6 +2947,7 @@ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
struct rdma_user_mmap_entry *entry,
size_t length, u32 min_pgoff,
u32 max_pgoff);
+void rdma_user_mmap_disassociate(struct ib_ucontext *ucontext);
static inline int
rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
@@ -4729,6 +4730,8 @@ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
+void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile);
+
struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,