Message ID | 20230925120309.1731676-9-dhowells@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | iov_iter: Convert the iterator macros into inline funcs | expand |
David Howells <dhowells@redhat.com> wrote: > +static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i) > { > - struct iov_iter *iter = priv2; > + size_t progress; > > - if (iov_iter_is_copy_mc(iter)) > - return copy_mc_to_kernel(to + progress, iter_from, len); > - return memcpy_from_iter(iter_from, progress, len, to, priv2); > + if (unlikely(i->count < bytes)) > + bytes = i->count; > + if (unlikely(!bytes)) > + return 0; > + progress = iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc); > + i->count -= progress; i->count shouldn't be decreased here as iterate_bvec() now does that. This causes the LTP abort01 test to log a warning under KASAN (see below). I'll remove the line and repush the patches. David LTP: starting abort01 ================================================================== BUG: KASAN: stack-out-of-bounds in __copy_from_iter_mc+0x2e6/0x480 Read of size 4 at addr ffffc90004777594 by task abort01/708 CPU: 4 PID: 708 Comm: abort01 Not tainted 99.6.0-rc3-ged6251886a1d #46 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009)/Incus, BIOS unknown 2/2/2022 Call Trace: <TASK> dump_stack_lvl+0x3d/0x70 print_report+0xce/0x650 ? lock_acquire+0x1b1/0x330 kasan_report+0xda/0x110 ? __copy_from_iter_mc+0x2e6/0x480 ? __copy_from_iter_mc+0x2e6/0x480 __copy_from_iter_mc+0x2e6/0x480 copy_page_from_iter_atomic+0x517/0x1350 ? __pfx_copy_page_from_iter_atomic+0x10/0x10 ? __filemap_get_folio+0x281/0x6c0 ? folio_wait_writeback+0x53/0x1e0 ? prepare_pages.constprop.0+0x40b/0x6c0 btrfs_copy_from_user+0xc6/0x290 btrfs_buffered_write+0x8c9/0x1190 ? __pfx_btrfs_buffered_write+0x10/0x10 ? _raw_spin_unlock+0x2d/0x50 ? btrfs_file_llseek+0x100/0xf00 ? follow_page_mask+0x69f/0x1e10 btrfs_do_write_iter+0x859/0xff0 ? __pfx_btrfs_file_llseek+0x10/0x10 ? find_held_lock+0x2d/0x110 ? __pfx_btrfs_do_write_iter+0x10/0x10 ? __up_read+0x211/0x790 ? __pfx___get_user_pages+0x10/0x10 ? __pfx___up_read+0x10/0x10 ? __kernel_write_iter+0x3be/0x6d0 __kernel_write_iter+0x226/0x6d0 ? __pfx___kernel_write_iter+0x10/0x10 dump_user_range+0x25d/0x650 ? __pfx_dump_user_range+0x10/0x10 ? __pfx_writenote+0x10/0x10 elf_core_dump+0x231f/0x2e90 ? __pfx_elf_core_dump+0x10/0x10 ? do_coredump+0x12a9/0x38c0 ? kasan_set_track+0x25/0x30 ? __kasan_kmalloc+0xaa/0xb0 ? __kmalloc_node+0x6c/0x1b0 ? do_coredump+0x12a9/0x38c0 ? get_signal+0x1e7d/0x20f0 ? 0xffffffffff600000 ? mas_next_slot+0x328/0x1dd0 ? lock_acquire+0x162/0x330 ? do_coredump+0x2537/0x38c0 do_coredump+0x2537/0x38c0 ? __pfx_do_coredump+0x10/0x10 ? kmem_cache_free+0x114/0x520 ? find_held_lock+0x2d/0x110 get_signal+0x1e7d/0x20f0 ? __pfx_get_signal+0x10/0x10 ? do_send_specific+0xf1/0x1c0 ? __pfx_do_send_specific+0x10/0x10 arch_do_signal_or_restart+0x8b/0x4b0 ? __pfx_arch_do_signal_or_restart+0x10/0x10 exit_to_user_mode_prepare+0xde/0x210 syscall_exit_to_user_mode+0x16/0x50 do_syscall_64+0x53/0x90 entry_SYSCALL_64_after_hwframe+0x6e/0xd8
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 65374ee91ecd..943aa3cfd7b3 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -253,14 +253,33 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) EXPORT_SYMBOL_GPL(_copy_mc_to_iter); #endif /* CONFIG_ARCH_HAS_COPY_MC */ -static size_t memcpy_from_iter_mc(void *iter_from, size_t progress, - size_t len, void *to, void *priv2) +static __always_inline +size_t memcpy_from_iter_mc(void *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + return copy_mc_to_kernel(to + progress, iter_from, len); +} + +static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i) { - struct iov_iter *iter = priv2; + size_t progress; - if (iov_iter_is_copy_mc(iter)) - return copy_mc_to_kernel(to + progress, iter_from, len); - return memcpy_from_iter(iter_from, progress, len, to, priv2); + if (unlikely(i->count < bytes)) + bytes = i->count; + if (unlikely(!bytes)) + return 0; + progress = iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc); + i->count -= progress; + return progress; +} + +static __always_inline +size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) +{ + if (unlikely(iov_iter_is_copy_mc(i))) + return __copy_from_iter_mc(addr, bytes, i); + return iterate_and_advance(i, bytes, addr, + copy_from_user_iter, memcpy_from_iter); } size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) @@ -270,9 +289,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) if (user_backed_iter(i)) might_fault(); - return iterate_and_advance2(i, bytes, addr, i, - copy_from_user_iter, - memcpy_from_iter_mc); + return __copy_from_iter(addr, bytes, i); } EXPORT_SYMBOL(_copy_from_iter); @@ -493,9 +510,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset, } p = kmap_atomic(page) + offset; - n = iterate_and_advance2(i, n, p, i, - copy_from_user_iter, - memcpy_from_iter_mc); + __copy_from_iter(p, n, i); kunmap_atomic(p); copied += n; offset += n;
iter->copy_mc is only used with a bvec iterator and only by dump_emit_page() in fs/coredump.c so rather than handle this in memcpy_from_iter_mc() where it is checked repeatedly by _copy_from_iter() and copy_page_from_iter_atomic(), Signed-off-by: David Howells <dhowells@redhat.com> cc: Alexander Viro <viro@zeniv.linux.org.uk> cc: Jens Axboe <axboe@kernel.dk> cc: Christoph Hellwig <hch@lst.de> cc: Christian Brauner <christian@brauner.io> cc: Matthew Wilcox <willy@infradead.org> cc: Linus Torvalds <torvalds@linux-foundation.org> cc: David Laight <David.Laight@ACULAB.COM> cc: linux-block@vger.kernel.org cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org --- lib/iov_iter.c | 39 +++++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 12 deletions(-)