@@ -142,6 +142,8 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
+size_t copy_page_from_iter_nocache(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i);
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
@@ -895,6 +895,26 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
}
EXPORT_SYMBOL(copy_page_to_iter);
+size_t copy_page_from_iter_nocache(struct page *page, size_t offset, size_t
+ bytes, struct iov_iter *i)
+{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
+ WARN_ON(1);
+ return 0;
+ }
+ if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ void *kaddr = kmap_atomic(page);
+ size_t wanted = _copy_from_iter_nocache(kaddr + offset, bytes, i);
+
+ kunmap_atomic(kaddr);
+ return wanted;
+ } else
+ return copy_page_from_iter_iovec(page, offset, bytes, i,
+ __copy_from_user_nocache);
+}
+
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
Add copy_page_from_iter_nocache, which wraps copy_page_from_iter_iovec and passes in a custom copyin function: __copy_from_user_nocache. This allows callers of copy_page_from_iter_nocache to copy data without disturbing the CPU cache. Signed-off-by: Joe Damato <jdamato@fastly.com> --- include/linux/uio.h | 2 ++ lib/iov_iter.c | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+)