diff mbox series

[v14,112/138] mm/filemap: Convert filemap_get_read_batch to use folios

Message ID 20210715033704.692967-113-willy@infradead.org (mailing list archive)
State New
Headers show
Series Memory folios | expand

Commit Message

Matthew Wilcox (Oracle) July 15, 2021, 3:36 a.m. UTC
The page cache only stores folios, never tail pages.  Saves 29 bytes
due to removing calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/filemap.c | 23 +++++++++++------------
 1 file changed, 11 insertions(+), 12 deletions(-)

Comments

Matthew Wilcox (Oracle) Aug. 21, 2021, 6:48 p.m. UTC | #1
On Thu, Jul 15, 2021 at 04:36:38AM +0100, Matthew Wilcox (Oracle) wrote:
>  	rcu_read_lock();
> -	for (head = xas_load(&xas); head; head = xas_next(&xas)) {
> -		if (xas_retry(&xas, head))
> +	for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
> +		if (xas_retry(&xas, folio))
>  			continue;
> -		if (xas.xa_index > max || xa_is_value(head))
> +		if (xas.xa_index > max || xa_is_value(folio))
>  			break;
> -		if (!page_cache_get_speculative(head))
> +		if (!folio_try_get_rcu(folio))
>  			goto retry;
>  
> -		/* Has the page moved or been split? */
> -		if (unlikely(head != xas_reload(&xas)))
> +		if (unlikely(folio != xas_reload(&xas)))
>  			goto put_page;
>  
> -		if (!pagevec_add(pvec, head))
> +		if (!pagevec_add(pvec, &folio->page))
>  			break;
> -		if (!PageUptodate(head))
> +		if (!folio_test_uptodate(folio))
>  			break;
> -		if (PageReadahead(head))
> +		if (folio_test_readahead(folio))
>  			break;
> -		xas.xa_index = head->index + thp_nr_pages(head) - 1;
> +		xas.xa_index = folio->index + folio_nr_pages(folio) - 1;
>  		xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK;
>  		continue;

It's not a bug in _this_ patch, but these last two lines become a bug
once the page cache is converted to store folios as multi-index entries
(as opposed to now when it replicates an order-N entry 2^N times).
I should not have used xas.xa_shift (which is the shift of the entry
we're looking for and is always 0), but xas.xa_node->shift (which is
the shift of the entry that we found).

If you have an order-7 page, occupying (say) indices 128-255, we set
xa_index to 255, but instead of setting xa_offset to 3, we set it to 63.
That tricks __xas_next() into going up to the parent node, and then back
down, which might mean that we terminate the scan early, or that we skip
over all the other entries in the node.  What I actually noticed was a
crash where we ended up loading an internal entry out of the XArray.

It's all a bit complicated really.  That calls for a helper, and this is
my current candidate:

+static inline void xas_advance(struct xa_state *xas, unsigned long index)
+{
+       unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0;
+
+       xas->xa_index = index;
+       xas->xa_offset = (index >> shift) & XA_CHUNK_MASK;
+}
...
-               xas.xa_index = folio->index + folio_nr_pages(folio) - 1;
-               xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK;
+               xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1);

This is coming up on 4 hours of continuous testing using generic/559.
Without it, it would usually crash in about 40 minutes.
diff mbox series

Patch

diff --git a/mm/filemap.c b/mm/filemap.c
index c4190c0a6d86..04501bf50448 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2272,32 +2272,31 @@  static void filemap_get_read_batch(struct address_space *mapping,
 		pgoff_t index, pgoff_t max, struct pagevec *pvec)
 {
 	XA_STATE(xas, &mapping->i_pages, index);
-	struct page *head;
+	struct folio *folio;
 
 	rcu_read_lock();
-	for (head = xas_load(&xas); head; head = xas_next(&xas)) {
-		if (xas_retry(&xas, head))
+	for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
+		if (xas_retry(&xas, folio))
 			continue;
-		if (xas.xa_index > max || xa_is_value(head))
+		if (xas.xa_index > max || xa_is_value(folio))
 			break;
-		if (!page_cache_get_speculative(head))
+		if (!folio_try_get_rcu(folio))
 			goto retry;
 
-		/* Has the page moved or been split? */
-		if (unlikely(head != xas_reload(&xas)))
+		if (unlikely(folio != xas_reload(&xas)))
 			goto put_page;
 
-		if (!pagevec_add(pvec, head))
+		if (!pagevec_add(pvec, &folio->page))
 			break;
-		if (!PageUptodate(head))
+		if (!folio_test_uptodate(folio))
 			break;
-		if (PageReadahead(head))
+		if (folio_test_readahead(folio))
 			break;
-		xas.xa_index = head->index + thp_nr_pages(head) - 1;
+		xas.xa_index = folio->index + folio_nr_pages(folio) - 1;
 		xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK;
 		continue;
 put_page:
-		put_page(head);
+		folio_put(folio);
 retry:
 		xas_reset(&xas);
 	}