@@ -280,8 +280,8 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
int j;
int ret;
- ret = get_user_pages(current, current->mm, addr,
- npages, 0, 1, pages, NULL);
+ ret = get_user_pages_unlocked(current, current->mm, addr,
+ npages, 0, 1, pages);
if (ret != npages) {
int i;
@@ -811,10 +811,7 @@ int ipath_user_sdma_writev(struct ipath_devdata *dd,
while (dim) {
const int mxp = 8;
- down_write(¤t->mm->mmap_sem);
ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
- up_write(¤t->mm->mmap_sem);
-
if (ret <= 0)
goto done_unlock;
else {
Function ipath_user_sdma_queue_pkts() gets called with mmap_sem held for writing. Except for get_user_pages() deep down in ipath_user_sdma_pin_pages() we don't seem to need mmap_sem at all. Even more interestingly the function ipath_user_sdma_queue_pkts() (and also ipath_user_sdma_coalesce() called somewhat later) call copy_from_user() which can hit a page fault and we deadlock on trying to get mmap_sem when handling that fault. So just make ipath_user_sdma_pin_pages() use get_user_pages_unlocked() and leave mmap_sem locking for mm. CC: Mike Marciniszyn <infinipath@intel.com> CC: Roland Dreier <roland@kernel.org> CC: linux-rdma@vger.kernel.org Signed-off-by: Jan Kara <jack@suse.cz> --- drivers/infiniband/hw/ipath/ipath_user_sdma.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-)