@@ -79,8 +79,8 @@
#define iterate_xarray(i, n, __v, skip, STEP) { \
struct page *head = NULL; \
size_t wanted = n, seg, offset; \
- loff_t start = i->xarray_start + skip; \
- pgoff_t index = start >> PAGE_SHIFT; \
+ loff_t xarray_start = i->xarray_start + skip; \
+ pgoff_t index = xarray_start >> PAGE_SHIFT; \
int j; \
\
XA_STATE(xas, i->xarray, index); \
@@ -113,7 +113,7 @@
n = wanted - n; \
}
-#define iterate_all_kinds(i, n, v, I, B, K, X) { \
+#define iterate_all_kinds(i, n, v, I, B, K) { \
if (likely(n)) { \
size_t skip = i->iov_offset; \
if (unlikely(i->type & ITER_BVEC)) { \
@@ -127,7 +127,7 @@
} else if (unlikely(i->type & ITER_DISCARD)) { \
} else if (unlikely(i->type & ITER_XARRAY)) { \
struct bio_vec v; \
- iterate_xarray(i, n, v, skip, (X)); \
+ iterate_xarray(i, n, v, skip, (B)); \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -842,9 +842,7 @@ bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
0;}),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
)
iov_iter_advance(i, bytes);
@@ -927,9 +925,7 @@ bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
0;}),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
)
iov_iter_advance(i, bytes);
@@ -1058,9 +1054,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
)
kunmap_atomic(kaddr);
return bytes;
@@ -1349,8 +1343,7 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
iterate_all_kinds(i, size, v,
(res |= (unsigned long)v.iov_base | v.iov_len, 0),
res |= v.bv_offset | v.bv_len,
- res |= (unsigned long)v.iov_base | v.iov_len,
- res |= v.bv_offset | v.bv_len
+ res |= (unsigned long)v.iov_base | v.iov_len
)
return res;
}
@@ -1372,9 +1365,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
(size != v.bv_len ? size : 0)),
(res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0)),
- (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
- (size != v.bv_len ? size : 0))
+ (size != v.iov_len ? size : 0))
);
return res;
}
@@ -1530,8 +1521,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
return v.bv_len;
}),({
return -EFAULT;
- }),
- 0
+ })
)
return 0;
}
@@ -1665,7 +1655,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
return v.bv_len;
}),({
return -EFAULT;
- }), 0
+ })
)
return 0;
}
@@ -1751,13 +1741,6 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
v.iov_base, v.iov_len,
sum, off);
off += v.iov_len;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
})
)
*csum = sum;
@@ -1892,8 +1875,7 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
- p / PAGE_SIZE;
if (npages >= maxpages)
return maxpages;
- }),
- 0
+ })
)
return npages;
}
Drop the X argument from iterate_all_kinds() and use the B argument instead as it's always the same unless the ITER_XARRAY is handled specially. Signed-off-by: David Howells <dhowells@redhat.com> --- lib/iov_iter.c | 42 ++++++++++++------------------------------ 1 file changed, 12 insertions(+), 30 deletions(-)