@@ -330,6 +330,22 @@ static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subreque
return NETFS_DOWNLOAD_FROM_SERVER;
}
+/*
+ * Prepare for a write to occur.
+ */
+static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size)
+{
+ loff_t start = *_start;
+ size_t len = *_len, down;
+
+ /* Round to DIO size */
+ down = start - round_down(start, PAGE_SIZE);
+ *_start = start - down;
+ *_len = round_up(down + len, PAGE_SIZE);
+ return 0;
+}
+
/*
* Clean up an operation.
*/
@@ -355,6 +371,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
.read = cachefiles_read,
.write = cachefiles_write,
.prepare_read = cachefiles_prepare_read,
+ .prepare_write = cachefiles_prepare_write,
};
/*
@@ -293,7 +293,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
struct netfs_cache_resources *cres = &rreq->cache_resources;
struct netfs_read_subrequest *subreq, *next, *p;
struct iov_iter iter;
- loff_t pos;
+ int ret;
trace_netfs_rreq(rreq, netfs_rreq_trace_write);
@@ -311,23 +311,22 @@ static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
/* Amalgamate adjacent writes */
- pos = round_down(subreq->start, PAGE_SIZE);
- if (pos != subreq->start) {
- subreq->len += subreq->start - pos;
- subreq->start = pos;
- }
- subreq->len = round_up(subreq->len, PAGE_SIZE);
-
while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
next = list_next_entry(subreq, rreq_link);
- if (next->start > subreq->start + subreq->len)
+ if (next->start != subreq->start + subreq->len)
break;
subreq->len += next->len;
- subreq->len = round_up(subreq->len, PAGE_SIZE);
list_del_init(&next->rreq_link);
netfs_put_subrequest(next, false);
}
+ ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
+ rreq->i_size);
+ if (ret < 0) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
+ continue;
+ }
+
iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
subreq->start, subreq->len);
@@ -206,6 +206,12 @@ struct netfs_cache_ops {
*/
enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq,
loff_t i_size);
+
+ /* Prepare a write operation, working out what part of the write we can
+ * actually do.
+ */
+ int (*prepare_write)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size);
};
struct readahead_control;
@@ -43,6 +43,7 @@ enum netfs_sreq_trace {
netfs_sreq_trace_submit,
netfs_sreq_trace_terminated,
netfs_sreq_trace_write,
+ netfs_sreq_trace_write_skip,
netfs_sreq_trace_write_term,
};
@@ -77,6 +78,7 @@ enum netfs_sreq_trace {
EM(netfs_sreq_trace_submit, "SUBMT") \
EM(netfs_sreq_trace_terminated, "TERM ") \
EM(netfs_sreq_trace_write, "WRITE") \
+ EM(netfs_sreq_trace_write_skip, "SKIP ") \
E_(netfs_sreq_trace_write_term, "WTERM")
Fix the amalgamation of subrequests when copying to the cache. We shouldn't be rounding up the size to PAGE_SIZE as we go along as that ends up with the composite subrequest length being too long - and this leads to EIO from the cache write because the source iterator doesn't contain enough data. Instead, we only need to deal with contiguous subreqs and then ask the cache to round off as it needs - which also means we don't have to make any assumptions about the cache granularity. Signed-off-by: David Howells <dhowells@redhat.com> --- fs/cachefiles/io.c | 17 +++++++++++++++++ fs/netfs/read_helper.c | 19 +++++++++---------- include/linux/netfs.h | 6 ++++++ include/trace/events/netfs.h | 2 ++ 4 files changed, 34 insertions(+), 10 deletions(-)