@@ -129,6 +129,7 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
+int copy_struct_from_iter(void *dst, size_t ksize, struct iov_iter *i);
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
@@ -995,6 +995,97 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
}
EXPORT_SYMBOL(copy_page_from_iter);
+/**
+ * copy_struct_from_iter - copy a struct from an iov_iter
+ * @dst: Destination buffer.
+ * @ksize: Size of @dst struct.
+ * @i: Source iterator.
+ *
+ * Copies a struct from an iov_iter in a way that guarantees
+ * backwards-compatibility for struct arguments in an iovec (as long as the
+ * rules for copy_struct_from_user() are followed).
+ *
+ * The source struct is assumed to be stored in the current segment of the
+ * iov_iter, and its size is the size of the current segment. The iov_iter must
+ * be positioned at the beginning of the current segment.
+ *
+ * The recommended usage is something like the following:
+ *
+ * int do_foo(struct iov_iter *i)
+ * {
+ * size_t usize = iov_iter_single_seg_count(i);
+ * struct foo karg;
+ * int err;
+ *
+ * if (usize > PAGE_SIZE)
+ * return -E2BIG;
+ * if (usize < FOO_SIZE_VER0)
+ * return -EINVAL;
+ * err = copy_struct_from_iter(&karg, sizeof(karg), i);
+ * if (err)
+ * return err;
+ *
+ * // ...
+ * }
+ *
+ * Returns 0 on success or one of the following errors:
+ * * -E2BIG: (size of current segment > @ksize) and there are non-zero
+ * trailing bytes in the current segment.
+ * * -EFAULT: access to userspace failed.
+ * * -EINVAL: the iterator is not at the beginning of the current segment.
+ *
+ * On success, the iterator is advanced to the next segment. On error, the
+ * iterator is not advanced.
+ */
+int copy_struct_from_iter(void *dst, size_t ksize, struct iov_iter *i)
+{
+ size_t usize;
+ int ret;
+
+ if (i->iov_offset != 0)
+ return -EINVAL;
+ if (iter_is_iovec(i)) {
+ usize = i->iov->iov_len;
+ might_fault();
+ if (copyin(dst, i->iov->iov_base, min(ksize, usize)))
+ return -EFAULT;
+ if (usize > ksize) {
+ ret = check_zeroed_user(i->iov->iov_base + ksize,
+ usize - ksize);
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return -E2BIG;
+ }
+ } else if (iov_iter_is_kvec(i)) {
+ usize = i->kvec->iov_len;
+ memcpy(dst, i->kvec->iov_base, min(ksize, usize));
+ if (usize > ksize &&
+ memchr_inv(i->kvec->iov_base + ksize, 0, usize - ksize))
+ return -E2BIG;
+ } else if (iov_iter_is_bvec(i)) {
+ char *p;
+
+ usize = i->bvec->bv_len;
+ p = kmap_local_page(i->bvec->bv_page);
+ memcpy(dst, p + i->bvec->bv_offset, min(ksize, usize));
+ if (usize > ksize &&
+ memchr_inv(p + i->bvec->bv_offset + ksize, 0,
+ usize - ksize)) {
+ kunmap_local(p);
+ return -E2BIG;
+ }
+ kunmap_local(p);
+ } else {
+ return -EFAULT;
+ }
+ if (usize < ksize)
+ memset(dst + usize, 0, ksize - usize);
+ iov_iter_advance(i, usize);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(copy_struct_from_iter);
+
static size_t pipe_zero(size_t bytes, struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;