diff mbox series

netfs: In readahead, put the folio refs as soon extracted

Message ID 3771538.1728052438@warthog.procyon.org.uk (mailing list archive)
State New
Headers show
Series netfs: In readahead, put the folio refs as soon extracted | expand

Commit Message

David Howells Oct. 4, 2024, 2:33 p.m. UTC
netfslib currently defers dropping the ref on the folios it obtains during
readahead to after it has started I/O on the basis that we can do it whilst
we wait for the I/O to complete, but this runs the risk of the I/O
collection racing with this in future.

Furthermore, Matthew Wilcox strongly suggests that the refs should be
dropped immediately, as readahead_folio() does (netfslib is using
__readahead_batch() which doesn't drop the refs).

Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading")
Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
---
 fs/netfs/buffered_read.c     |   47 ++++++++++++-------------------------------
 fs/netfs/read_collect.c      |    2 +
 include/trace/events/netfs.h |    1 
 3 files changed, 16 insertions(+), 34 deletions(-)

Comments

Christian Brauner Oct. 7, 2024, 11:48 a.m. UTC | #1
On Fri, 04 Oct 2024 15:33:58 +0100, David Howells wrote:
> netfslib currently defers dropping the ref on the folios it obtains during
> readahead to after it has started I/O on the basis that we can do it whilst
> we wait for the I/O to complete, but this runs the risk of the I/O
> collection racing with this in future.
> 
> Furthermore, Matthew Wilcox strongly suggests that the refs should be
> dropped immediately, as readahead_folio() does (netfslib is using
> __readahead_batch() which doesn't drop the refs).
> 
> [...]

Applied to the vfs.fixes branch of the vfs/vfs.git tree.
Patches in the vfs.fixes branch should appear in linux-next soon.

Please report any outstanding bugs that were missed during review in a
new review to the original patch series allowing us to drop it.

It's encouraged to provide Acked-bys and Reviewed-bys even though the
patch has now been applied. If possible patch trailers will be updated.

Note that commit hashes shown below are subject to change due to rebase,
trailer updates or similar. If in doubt, please check the listed branch.

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs.git
branch: vfs.fixes

[1/1] netfs: In readahead, put the folio refs as soon extracted
      https://git.kernel.org/vfs/vfs/c/796a4049640b
diff mbox series

Patch

diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index c40e226053cc..af46a598f4d7 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -67,7 +67,8 @@  static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_in
  * Decant the list of folios to read into a rolling buffer.
  */
 static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq,
-					struct folio_queue *folioq)
+					struct folio_queue *folioq,
+					struct folio_batch *put_batch)
 {
 	unsigned int order, nr;
 	size_t size = 0;
@@ -82,6 +83,9 @@  static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq,
 		order = folio_order(folio);
 		folioq->orders[i] = order;
 		size += PAGE_SIZE << order;
+
+		if (!folio_batch_add(put_batch, folio))
+			folio_batch_release(put_batch);
 	}
 
 	for (int i = nr; i < folioq_nr_slots(folioq); i++)
@@ -120,6 +124,9 @@  static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
 		 * that we will need to release later - but we don't want to do
 		 * that until after we've started the I/O.
 		 */
+		struct folio_batch put_batch;
+
+		folio_batch_init(&put_batch);
 		while (rreq->submitted < subreq->start + rsize) {
 			struct folio_queue *tail = rreq->buffer_tail, *new;
 			size_t added;
@@ -132,10 +139,11 @@  static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
 			new->prev = tail;
 			tail->next = new;
 			rreq->buffer_tail = new;
-			added = netfs_load_buffer_from_ra(rreq, new);
+			added = netfs_load_buffer_from_ra(rreq, new, &put_batch);
 			rreq->iter.count += added;
 			rreq->submitted += added;
 		}
+		folio_batch_release(&put_batch);
 	}
 
 	subreq->len = rsize;
@@ -348,6 +356,7 @@  static int netfs_wait_for_read(struct netfs_io_request *rreq)
 static int netfs_prime_buffer(struct netfs_io_request *rreq)
 {
 	struct folio_queue *folioq;
+	struct folio_batch put_batch;
 	size_t added;
 
 	folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
@@ -360,39 +369,14 @@  static int netfs_prime_buffer(struct netfs_io_request *rreq)
 	rreq->submitted = rreq->start;
 	iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, 0);
 
-	added = netfs_load_buffer_from_ra(rreq, folioq);
+	folio_batch_init(&put_batch);
+	added = netfs_load_buffer_from_ra(rreq, folioq, &put_batch);
+	folio_batch_release(&put_batch);
 	rreq->iter.count += added;
 	rreq->submitted += added;
 	return 0;
 }
 
-/*
- * Drop the ref on each folio that we inherited from the VM readahead code.  We
- * still have the folio locks to pin the page until we complete the I/O.
- *
- * Note that we can't just release the batch in each queue struct as we use the
- * occupancy count in other places.
- */
-static void netfs_put_ra_refs(struct folio_queue *folioq)
-{
-	struct folio_batch fbatch;
-
-	folio_batch_init(&fbatch);
-	while (folioq) {
-		for (unsigned int slot = 0; slot < folioq_count(folioq); slot++) {
-			struct folio *folio = folioq_folio(folioq, slot);
-			if (!folio)
-				continue;
-			trace_netfs_folio(folio, netfs_folio_trace_read_put);
-			if (!folio_batch_add(&fbatch, folio))
-				folio_batch_release(&fbatch);
-		}
-		folioq = folioq->next;
-	}
-
-	folio_batch_release(&fbatch);
-}
-
 /**
  * netfs_readahead - Helper to manage a read request
  * @ractl: The description of the readahead request
@@ -436,9 +420,6 @@  void netfs_readahead(struct readahead_control *ractl)
 		goto cleanup_free;
 	netfs_read_to_pagecache(rreq);
 
-	/* Release the folio refs whilst we're waiting for the I/O. */
-	netfs_put_ra_refs(rreq->buffer);
-
 	netfs_put_request(rreq, true, netfs_rreq_trace_put_return);
 	return;
 
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index b18c65ba5580..3cbb289535a8 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -77,6 +77,8 @@  static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
 			folio_unlock(folio);
 		}
 	}
+
+	folioq_clear(folioq, slot);
 }
 
 /*
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 1d7c52821e55..69975c9c6823 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -172,7 +172,6 @@ 
 	EM(netfs_folio_trace_read,		"read")		\
 	EM(netfs_folio_trace_read_done,		"read-done")	\
 	EM(netfs_folio_trace_read_gaps,		"read-gaps")	\
-	EM(netfs_folio_trace_read_put,		"read-put")	\
 	EM(netfs_folio_trace_read_unlock,	"read-unlock")	\
 	EM(netfs_folio_trace_redirtied,		"redirtied")	\
 	EM(netfs_folio_trace_store,		"store")	\