diff mbox series

[RFC,1/4] iomap: allow passing a folio into write begin path

Message ID 20241119154656.774395-2-bfoster@redhat.com (mailing list archive)
State New
Headers show
Series iomap: zero range folio batch processing prototype | expand

Commit Message

Brian Foster Nov. 19, 2024, 3:46 p.m. UTC
To facilitate batch processing of dirty folios for zero range, tweak
the write begin path to allow the caller to optionally pass in its
own folio/pos combination.

Signed-off-by: Brian Foster <bfoster@redhat.com>
---
 fs/iomap/buffered-io.c | 20 +++++++++++++-------
 1 file changed, 13 insertions(+), 7 deletions(-)

Comments

Christoph Hellwig Nov. 20, 2024, 8:38 a.m. UTC | #1
On Tue, Nov 19, 2024 at 10:46:53AM -0500, Brian Foster wrote:
> To facilitate batch processing of dirty folios for zero range, tweak
> the write begin path to allow the caller to optionally pass in its
> own folio/pos combination.
> 
> Signed-off-by: Brian Foster <bfoster@redhat.com>
> ---
>  fs/iomap/buffered-io.c | 20 +++++++++++++-------
>  1 file changed, 13 insertions(+), 7 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index ce73d2a48c1e..d1a86aea1a7a 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -781,7 +781,7 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
>  {
>  	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
>  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
> -	struct folio *folio;
> +	struct folio *folio = *foliop;
>  	int status = 0;
>  
>  	BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
> @@ -794,9 +794,15 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
>  	if (!mapping_large_folio_support(iter->inode->i_mapping))
>  		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
>  
> -	folio = __iomap_get_folio(iter, pos, len);
> -	if (IS_ERR(folio))
> -		return PTR_ERR(folio);
> +	/*
> +	 * XXX: Might want to plumb batch handling down through here. For now
> +	 * let the caller do it.

Yeah, plumbing in the batch here would be nicer.

I suspect doing batch processing might actually be a neat thing for
the normal write patch as well.
Brian Foster Nov. 20, 2024, 2:29 p.m. UTC | #2
On Wed, Nov 20, 2024 at 12:38:41AM -0800, Christoph Hellwig wrote:
> On Tue, Nov 19, 2024 at 10:46:53AM -0500, Brian Foster wrote:
> > To facilitate batch processing of dirty folios for zero range, tweak
> > the write begin path to allow the caller to optionally pass in its
> > own folio/pos combination.
> > 
> > Signed-off-by: Brian Foster <bfoster@redhat.com>
> > ---
> >  fs/iomap/buffered-io.c | 20 +++++++++++++-------
> >  1 file changed, 13 insertions(+), 7 deletions(-)
> > 
> > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> > index ce73d2a48c1e..d1a86aea1a7a 100644
> > --- a/fs/iomap/buffered-io.c
> > +++ b/fs/iomap/buffered-io.c
> > @@ -781,7 +781,7 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
> >  {
> >  	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
> >  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
> > -	struct folio *folio;
> > +	struct folio *folio = *foliop;
> >  	int status = 0;
> >  
> >  	BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
> > @@ -794,9 +794,15 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
> >  	if (!mapping_large_folio_support(iter->inode->i_mapping))
> >  		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
> >  
> > -	folio = __iomap_get_folio(iter, pos, len);
> > -	if (IS_ERR(folio))
> > -		return PTR_ERR(folio);
> > +	/*
> > +	 * XXX: Might want to plumb batch handling down through here. For now
> > +	 * let the caller do it.
> 
> Yeah, plumbing in the batch here would be nicer.
> 

Ok, I'll take a closer look at that. IIRC I punted on it initially
because we'll also have to fix up pos/len down in this path when the
next folio is not contiguous. This was easier to just get the basics
working.

Brian

> I suspect doing batch processing might actually be a neat thing for
> the normal write patch as well.
>
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index ce73d2a48c1e..d1a86aea1a7a 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -781,7 +781,7 @@  static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
 {
 	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
-	struct folio *folio;
+	struct folio *folio = *foliop;
 	int status = 0;
 
 	BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
@@ -794,9 +794,15 @@  static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
 	if (!mapping_large_folio_support(iter->inode->i_mapping))
 		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
 
-	folio = __iomap_get_folio(iter, pos, len);
-	if (IS_ERR(folio))
-		return PTR_ERR(folio);
+	/*
+	 * XXX: Might want to plumb batch handling down through here. For now
+	 * let the caller do it.
+	 */
+	if (!folio) {
+		folio = __iomap_get_folio(iter, pos, len);
+		if (IS_ERR(folio))
+			return PTR_ERR(folio);
+	}
 
 	/*
 	 * Now we have a locked folio, before we do anything with it we need to
@@ -918,7 +924,7 @@  static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
 	unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
 
 	do {
-		struct folio *folio;
+		struct folio *folio = NULL;
 		loff_t old_size;
 		size_t offset;		/* Offset into folio */
 		size_t bytes;		/* Bytes to write to folio */
@@ -1281,7 +1287,7 @@  static loff_t iomap_unshare_iter(struct iomap_iter *iter)
 		return length;
 
 	do {
-		struct folio *folio;
+		struct folio *folio = NULL;
 		int status;
 		size_t offset;
 		size_t bytes = min_t(u64, SIZE_MAX, length);
@@ -1385,7 +1391,7 @@  static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
 	}
 
 	do {
-		struct folio *folio;
+		struct folio *folio = NULL;
 		int status;
 		size_t offset;
 		size_t bytes = min_t(u64, SIZE_MAX, length);