diff mbox series

[RFC,06/13] iov_iter: support atomic copy_page_from_iter_iovec()

Message ID 20201129004548.1619714-7-namit@vmware.com (mailing list archive)
State New, archived
Headers show
Series fs/userfaultfd: support iouring and polling | expand

Commit Message

Nadav Amit Nov. 29, 2020, 12:45 a.m. UTC
From: Nadav Amit <namit@vmware.com>

copy_page_from_iter_iovec() cannot be used when preemption is enabled.

Change copy_page_from_iter_iovec() into __copy_page_from_iter_iovec()
with an additional parameter that says whether the caller runs in atomic
context. When __copy_page_from_iter_iovec() is used in an atomic context
it will gracefully fail but would not lead to a deadlock. The caller
is expected to recover from such failure gracefully.

Cc: Jens Axboe <axboe@kernel.dk>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: io-uring@vger.kernel.org
Cc: linux-fsdevel@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
Signed-off-by: Nadav Amit <namit@vmware.com>
---
 include/linux/uio.h |  3 +++
 lib/iov_iter.c      | 23 +++++++++++++++++------
 2 files changed, 20 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/uio.h b/include/linux/uio.h
index 72d88566694e..7c90f7371a6f 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -121,6 +121,9 @@  size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 			 struct iov_iter *i);
 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 			 struct iov_iter *i);
+size_t __copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
+			 struct iov_iter *i, bool atomic);
+
 
 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 1635111c5bd2..e597df6a46a7 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -246,7 +246,7 @@  static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
 }
 
 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
-			 struct iov_iter *i)
+			 struct iov_iter *i, bool atomic)
 {
 	size_t skip, copy, left, wanted;
 	const struct iovec *iov;
@@ -259,14 +259,15 @@  static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
 	if (unlikely(!bytes))
 		return 0;
 
-	might_fault();
+	if (!atomic)
+		might_fault();
 	wanted = bytes;
 	iov = i->iov;
 	skip = i->iov_offset;
 	buf = iov->iov_base + skip;
 	copy = min(bytes, iov->iov_len - skip);
 
-	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
+	if (atomic || (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy))) {
 		kaddr = kmap_atomic(page);
 		to = kaddr + offset;
 
@@ -295,6 +296,9 @@  static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
 		buf += copy;
 		kunmap_atomic(kaddr);
 		copy = min(bytes, iov->iov_len - skip);
+		if (atomic)
+			goto done;
+
 	}
 	/* Too bad - revert to non-atomic kmap */
 
@@ -929,8 +933,8 @@  size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 }
 EXPORT_SYMBOL(copy_page_to_iter);
 
-size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
-			 struct iov_iter *i)
+size_t __copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
+			 struct iov_iter *i, bool atomic)
 {
 	if (unlikely(!page_copy_sane(page, offset, bytes)))
 		return 0;
@@ -944,7 +948,14 @@  size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 		kunmap_atomic(kaddr);
 		return wanted;
 	} else
-		return copy_page_from_iter_iovec(page, offset, bytes, i);
+		return copy_page_from_iter_iovec(page, offset, bytes, i, atomic);
+}
+EXPORT_SYMBOL(__copy_page_from_iter);
+
+size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
+			 struct iov_iter *i)
+{
+	return __copy_page_from_iter(page, offset, bytes, i, false);
 }
 EXPORT_SYMBOL(copy_page_from_iter);