diff mbox series

[12/13] fuse: convert direct io to use folios

Message ID 20241002165253.3872513-13-joannelkoong@gmail.com (mailing list archive)
State New
Headers show
Series fuse: use folios instead of pages for requests | expand

Commit Message

Joanne Koong Oct. 2, 2024, 4:52 p.m. UTC
Convert direct io requests to use folios instead of pages.

No functional changes.

Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
---
 fs/fuse/file.c | 88 ++++++++++++++++++++++----------------------------
 1 file changed, 38 insertions(+), 50 deletions(-)

Comments

Josef Bacik Oct. 18, 2024, 8:02 p.m. UTC | #1
On Wed, Oct 02, 2024 at 09:52:52AM -0700, Joanne Koong wrote:
> Convert direct io requests to use folios instead of pages.
> 
> No functional changes.
> 
> Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
> ---
>  fs/fuse/file.c | 88 ++++++++++++++++++++++----------------------------
>  1 file changed, 38 insertions(+), 50 deletions(-)
> 
> diff --git a/fs/fuse/file.c b/fs/fuse/file.c
> index 1fa870fb3cc4..38ed9026f286 100644
> --- a/fs/fuse/file.c
> +++ b/fs/fuse/file.c
> @@ -665,11 +665,11 @@ static void fuse_release_user_pages(struct fuse_args_pages *ap,
>  {
>  	unsigned int i;
>  
> -	for (i = 0; i < ap->num_pages; i++) {
> +	for (i = 0; i < ap->num_folios; i++) {
>  		if (should_dirty)
> -			set_page_dirty_lock(ap->pages[i]);
> +			folio_mark_dirty_lock(ap->folios[i]);
>  		if (ap->args.is_pinned)
> -			unpin_user_page(ap->pages[i]);
> +			unpin_folio(ap->folios[i]);
>  	}
>  }
>  
> @@ -739,24 +739,6 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
>  	kref_put(&io->refcnt, fuse_io_release);
>  }
>  
> -static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
> -					  unsigned int npages)
> -{
> -	struct fuse_io_args *ia;
> -
> -	ia = kzalloc(sizeof(*ia), GFP_KERNEL);
> -	if (ia) {
> -		ia->io = io;
> -		ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
> -						&ia->ap.descs);
> -		if (!ia->ap.pages) {
> -			kfree(ia);
> -			ia = NULL;
> -		}
> -	}
> -	return ia;
> -}
> -
>  static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io,
>  						 unsigned int nfolios)
>  {
> @@ -776,12 +758,6 @@ static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io,
>  	return ia;
>  }
>  
> -static void fuse_io_free(struct fuse_io_args *ia)
> -{
> -	kfree(ia->ap.pages);
> -	kfree(ia);
> -}
> -
>  static void fuse_io_folios_free(struct fuse_io_args *ia)
>  {
>  	kfree(ia->ap.folios);
> @@ -814,7 +790,7 @@ static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
>  	}
>  
>  	fuse_aio_complete(io, err, pos);
> -	fuse_io_free(ia);
> +	fuse_io_folios_free(ia);
>  }
>  
>  static ssize_t fuse_async_req_send(struct fuse_mount *fm,
> @@ -1518,10 +1494,11 @@ static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
>  
>  static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
>  			       size_t *nbytesp, int write,
> -			       unsigned int max_pages)
> +			       unsigned int max_folios)
>  {
>  	size_t nbytes = 0;  /* # bytes already packed in req */
>  	ssize_t ret = 0;
> +	ssize_t i = 0;
>  
>  	/* Special case for kernel I/O: can copy directly into the buffer */
>  	if (iov_iter_is_kvec(ii)) {
> @@ -1538,15 +1515,23 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
>  		return 0;
>  	}
>  
> -	while (nbytes < *nbytesp && ap->num_pages < max_pages) {
> -		unsigned npages;
> +	/*
> +	 * Until there is support for iov_iter_extract_folios(), we have to
> +	 * manually extract pages using iov_iter_extract_pages() and then
> +	 * copy that to a folios array.
> +	 */
> +	struct page **pages = kzalloc((max_folios - ap->num_folios) * sizeof(struct page *),
> +				      GFP_KERNEL);
> +	if (!pages)
> +		return -ENOMEM;
> +
> +	while (nbytes < *nbytesp && ap->num_folios < max_folios) {
> +		unsigned nfolios;
>  		size_t start;
> -		struct page **pt_pages;
>  
> -		pt_pages = &ap->pages[ap->num_pages];
> -		ret = iov_iter_extract_pages(ii, &pt_pages,
> +		ret = iov_iter_extract_pages(ii, &pages,
>  					     *nbytesp - nbytes,
> -					     max_pages - ap->num_pages,
> +					     max_folios - ap->num_folios,
>  					     0, &start);
>  		if (ret < 0)
>  			break;
> @@ -1554,15 +1539,18 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
>  		nbytes += ret;
>  
>  		ret += start;
> -		npages = DIV_ROUND_UP(ret, PAGE_SIZE);
> +		nfolios = DIV_ROUND_UP(ret, PAGE_SIZE);
>  
> -		ap->descs[ap->num_pages].offset = start;
> -		fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
> +		ap->folio_descs[ap->num_folios].offset = start;
> +		fuse_folio_descs_length_init(ap->folio_descs, ap->num_folios, nfolios);

With this conversion fuse_page_descs_length_init now has no users, so I'd add a
followup patch at the end of the series to remove it.  Thanks,

Josef
Joanne Koong Oct. 21, 2024, 10:02 p.m. UTC | #2
On Fri, Oct 18, 2024 at 1:02 PM Josef Bacik <josef@toxicpanda.com> wrote:
>
> On Wed, Oct 02, 2024 at 09:52:52AM -0700, Joanne Koong wrote:
> > Convert direct io requests to use folios instead of pages.
> >
> > No functional changes.
> >
> > Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
> > ---
> >  fs/fuse/file.c | 88 ++++++++++++++++++++++----------------------------
> >  1 file changed, 38 insertions(+), 50 deletions(-)
> >
> > diff --git a/fs/fuse/file.c b/fs/fuse/file.c
> > index 1fa870fb3cc4..38ed9026f286 100644
> > --- a/fs/fuse/file.c
> > +++ b/fs/fuse/file.c
> > @@ -665,11 +665,11 @@ static void fuse_release_user_pages(struct fuse_args_pages *ap,
> >  {
> >       unsigned int i;
> >
> > -     for (i = 0; i < ap->num_pages; i++) {
> > +     for (i = 0; i < ap->num_folios; i++) {
> >               if (should_dirty)
> > -                     set_page_dirty_lock(ap->pages[i]);
> > +                     folio_mark_dirty_lock(ap->folios[i]);
> >               if (ap->args.is_pinned)
> > -                     unpin_user_page(ap->pages[i]);
> > +                     unpin_folio(ap->folios[i]);
> >       }
> >  }
> >
> > @@ -739,24 +739,6 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
> >       kref_put(&io->refcnt, fuse_io_release);
> >  }
> >
> > -static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
> > -                                       unsigned int npages)
> > -{
> > -     struct fuse_io_args *ia;
> > -
> > -     ia = kzalloc(sizeof(*ia), GFP_KERNEL);
> > -     if (ia) {
> > -             ia->io = io;
> > -             ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
> > -                                             &ia->ap.descs);
> > -             if (!ia->ap.pages) {
> > -                     kfree(ia);
> > -                     ia = NULL;
> > -             }
> > -     }
> > -     return ia;
> > -}
> > -
> >  static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io,
> >                                                unsigned int nfolios)
> >  {
> > @@ -776,12 +758,6 @@ static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io,
> >       return ia;
> >  }
> >
> > -static void fuse_io_free(struct fuse_io_args *ia)
> > -{
> > -     kfree(ia->ap.pages);
> > -     kfree(ia);
> > -}
> > -
> >  static void fuse_io_folios_free(struct fuse_io_args *ia)
> >  {
> >       kfree(ia->ap.folios);
> > @@ -814,7 +790,7 @@ static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
> >       }
> >
> >       fuse_aio_complete(io, err, pos);
> > -     fuse_io_free(ia);
> > +     fuse_io_folios_free(ia);
> >  }
> >
> >  static ssize_t fuse_async_req_send(struct fuse_mount *fm,
> > @@ -1518,10 +1494,11 @@ static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
> >
> >  static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
> >                              size_t *nbytesp, int write,
> > -                            unsigned int max_pages)
> > +                            unsigned int max_folios)
> >  {
> >       size_t nbytes = 0;  /* # bytes already packed in req */
> >       ssize_t ret = 0;
> > +     ssize_t i = 0;
> >
> >       /* Special case for kernel I/O: can copy directly into the buffer */
> >       if (iov_iter_is_kvec(ii)) {
> > @@ -1538,15 +1515,23 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
> >               return 0;
> >       }
> >
> > -     while (nbytes < *nbytesp && ap->num_pages < max_pages) {
> > -             unsigned npages;
> > +     /*
> > +      * Until there is support for iov_iter_extract_folios(), we have to
> > +      * manually extract pages using iov_iter_extract_pages() and then
> > +      * copy that to a folios array.
> > +      */
> > +     struct page **pages = kzalloc((max_folios - ap->num_folios) * sizeof(struct page *),
> > +                                   GFP_KERNEL);
> > +     if (!pages)
> > +             return -ENOMEM;
> > +
> > +     while (nbytes < *nbytesp && ap->num_folios < max_folios) {
> > +             unsigned nfolios;
> >               size_t start;
> > -             struct page **pt_pages;
> >
> > -             pt_pages = &ap->pages[ap->num_pages];
> > -             ret = iov_iter_extract_pages(ii, &pt_pages,
> > +             ret = iov_iter_extract_pages(ii, &pages,
> >                                            *nbytesp - nbytes,
> > -                                          max_pages - ap->num_pages,
> > +                                          max_folios - ap->num_folios,
> >                                            0, &start);
> >               if (ret < 0)
> >                       break;
> > @@ -1554,15 +1539,18 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
> >               nbytes += ret;
> >
> >               ret += start;
> > -             npages = DIV_ROUND_UP(ret, PAGE_SIZE);
> > +             nfolios = DIV_ROUND_UP(ret, PAGE_SIZE);
> >
> > -             ap->descs[ap->num_pages].offset = start;
> > -             fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
> > +             ap->folio_descs[ap->num_folios].offset = start;
> > +             fuse_folio_descs_length_init(ap->folio_descs, ap->num_folios, nfolios);
>
> With this conversion fuse_page_descs_length_init now has no users, so I'd add a
> followup patch at the end of the series to remove it.  Thanks,

Great catch. Thanks for reviewing these patches. I agree with your
other comments as well and will make sure to address those for v2.

Thanks,
Joanne
>
> Josef
diff mbox series

Patch

diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 1fa870fb3cc4..38ed9026f286 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -665,11 +665,11 @@  static void fuse_release_user_pages(struct fuse_args_pages *ap,
 {
 	unsigned int i;
 
-	for (i = 0; i < ap->num_pages; i++) {
+	for (i = 0; i < ap->num_folios; i++) {
 		if (should_dirty)
-			set_page_dirty_lock(ap->pages[i]);
+			folio_mark_dirty_lock(ap->folios[i]);
 		if (ap->args.is_pinned)
-			unpin_user_page(ap->pages[i]);
+			unpin_folio(ap->folios[i]);
 	}
 }
 
@@ -739,24 +739,6 @@  static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
 	kref_put(&io->refcnt, fuse_io_release);
 }
 
-static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
-					  unsigned int npages)
-{
-	struct fuse_io_args *ia;
-
-	ia = kzalloc(sizeof(*ia), GFP_KERNEL);
-	if (ia) {
-		ia->io = io;
-		ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
-						&ia->ap.descs);
-		if (!ia->ap.pages) {
-			kfree(ia);
-			ia = NULL;
-		}
-	}
-	return ia;
-}
-
 static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io,
 						 unsigned int nfolios)
 {
@@ -776,12 +758,6 @@  static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io,
 	return ia;
 }
 
-static void fuse_io_free(struct fuse_io_args *ia)
-{
-	kfree(ia->ap.pages);
-	kfree(ia);
-}
-
 static void fuse_io_folios_free(struct fuse_io_args *ia)
 {
 	kfree(ia->ap.folios);
@@ -814,7 +790,7 @@  static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
 	}
 
 	fuse_aio_complete(io, err, pos);
-	fuse_io_free(ia);
+	fuse_io_folios_free(ia);
 }
 
 static ssize_t fuse_async_req_send(struct fuse_mount *fm,
@@ -1518,10 +1494,11 @@  static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
 
 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
 			       size_t *nbytesp, int write,
-			       unsigned int max_pages)
+			       unsigned int max_folios)
 {
 	size_t nbytes = 0;  /* # bytes already packed in req */
 	ssize_t ret = 0;
+	ssize_t i = 0;
 
 	/* Special case for kernel I/O: can copy directly into the buffer */
 	if (iov_iter_is_kvec(ii)) {
@@ -1538,15 +1515,23 @@  static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
 		return 0;
 	}
 
-	while (nbytes < *nbytesp && ap->num_pages < max_pages) {
-		unsigned npages;
+	/*
+	 * Until there is support for iov_iter_extract_folios(), we have to
+	 * manually extract pages using iov_iter_extract_pages() and then
+	 * copy that to a folios array.
+	 */
+	struct page **pages = kzalloc((max_folios - ap->num_folios) * sizeof(struct page *),
+				      GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	while (nbytes < *nbytesp && ap->num_folios < max_folios) {
+		unsigned nfolios;
 		size_t start;
-		struct page **pt_pages;
 
-		pt_pages = &ap->pages[ap->num_pages];
-		ret = iov_iter_extract_pages(ii, &pt_pages,
+		ret = iov_iter_extract_pages(ii, &pages,
 					     *nbytesp - nbytes,
-					     max_pages - ap->num_pages,
+					     max_folios - ap->num_folios,
 					     0, &start);
 		if (ret < 0)
 			break;
@@ -1554,15 +1539,18 @@  static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
 		nbytes += ret;
 
 		ret += start;
-		npages = DIV_ROUND_UP(ret, PAGE_SIZE);
+		nfolios = DIV_ROUND_UP(ret, PAGE_SIZE);
 
-		ap->descs[ap->num_pages].offset = start;
-		fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
+		ap->folio_descs[ap->num_folios].offset = start;
+		fuse_folio_descs_length_init(ap->folio_descs, ap->num_folios, nfolios);
+		for (i = 0; i < nfolios; i++)
+			ap->folios[i + ap->num_folios] = page_folio(pages[i]);
 
-		ap->num_pages += npages;
-		ap->descs[ap->num_pages - 1].length -=
+		ap->num_folios += nfolios;
+		ap->folio_descs[ap->num_folios - 1].length -=
 			(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
 	}
+	kfree(pages);
 
 	ap->args.is_pinned = iov_iter_extract_will_pin(ii);
 	ap->args.user_pages = true;
@@ -1594,18 +1582,18 @@  ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
 	ssize_t res = 0;
 	int err = 0;
 	struct fuse_io_args *ia;
-	unsigned int max_pages;
+	unsigned int max_folios;
 	bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
 
-	max_pages = iov_iter_npages(iter, fc->max_pages);
-	ia = fuse_io_alloc(io, max_pages);
+	max_folios = iov_iter_npages(iter, fc->max_pages);
+	ia = fuse_io_folios_alloc(io, max_folios);
 	if (!ia)
 		return -ENOMEM;
 
 	if (fopen_direct_io && fc->direct_io_allow_mmap) {
 		res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
 		if (res) {
-			fuse_io_free(ia);
+			fuse_io_folios_free(ia);
 			return res;
 		}
 	}
@@ -1620,7 +1608,7 @@  ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
 	if (fopen_direct_io && write) {
 		res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
 		if (res) {
-			fuse_io_free(ia);
+			fuse_io_folios_free(ia);
 			return res;
 		}
 	}
@@ -1632,7 +1620,7 @@  ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
 		size_t nbytes = min(count, nmax);
 
 		err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
-					  max_pages);
+					  max_folios);
 		if (err && !nbytes)
 			break;
 
@@ -1647,7 +1635,7 @@  ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
 
 		if (!io->async || nres < 0) {
 			fuse_release_user_pages(&ia->ap, io->should_dirty);
-			fuse_io_free(ia);
+			fuse_io_folios_free(ia);
 		}
 		ia = NULL;
 		if (nres < 0) {
@@ -1665,14 +1653,14 @@  ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
 			break;
 		}
 		if (count) {
-			max_pages = iov_iter_npages(iter, fc->max_pages);
-			ia = fuse_io_alloc(io, max_pages);
+			max_folios = iov_iter_npages(iter, fc->max_pages);
+			ia = fuse_io_folios_alloc(io, max_folios);
 			if (!ia)
 				break;
 		}
 	}
 	if (ia)
-		fuse_io_free(ia);
+		fuse_io_folios_free(ia);
 	if (res > 0)
 		*ppos = pos;