diff mbox series

[v7,07/11] iomap: fix iomap_dio_zero() for fs bs > system page size

Message ID 20240607145902.1137853-8-kernel@pankajraghav.com (mailing list archive)
State Superseded, archived
Headers show
Series enable bs > ps in XFS | expand

Commit Message

Pankaj Raghav (Samsung) June 7, 2024, 2:58 p.m. UTC
From: Pankaj Raghav <p.raghav@samsung.com>

iomap_dio_zero() will pad a fs block with zeroes if the direct IO size
< fs block size. iomap_dio_zero() has an implicit assumption that fs block
size < page_size. This is true for most filesystems at the moment.

If the block size > page size, this will send the contents of the page
next to zero page(as len > PAGE_SIZE) to the underlying block device,
causing FS corruption.

iomap is a generic infrastructure and it should not make any assumptions
about the fs block size and the page size of the system.

Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
 fs/internal.h          |  5 +++++
 fs/iomap/buffered-io.c |  6 ++++++
 fs/iomap/direct-io.c   | 26 ++++++++++++++++++++++++--
 3 files changed, 35 insertions(+), 2 deletions(-)

Comments

John Garry June 11, 2024, 7:38 a.m. UTC | #1
On 07/06/2024 15:58, Pankaj Raghav (Samsung) wrote:
> From: Pankaj Raghav <p.raghav@samsung.com>
> 
> iomap_dio_zero() will pad a fs block with zeroes if the direct IO size
> < fs block size. iomap_dio_zero() has an implicit assumption that fs block
> size < page_size. This is true for most filesystems at the moment.
> 
> If the block size > page size, this will send the contents of the page
> next to zero page(as len > PAGE_SIZE) to the underlying block device,
> causing FS corruption.
> 
> iomap is a generic infrastructure and it should not make any assumptions
> about the fs block size and the page size of the system.
> 
> Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
> Reviewed-by: Hannes Reinecke <hare@suse.de>
> ---
>   fs/internal.h          |  5 +++++
>   fs/iomap/buffered-io.c |  6 ++++++
>   fs/iomap/direct-io.c   | 26 ++++++++++++++++++++++++--
>   3 files changed, 35 insertions(+), 2 deletions(-)
> 
> diff --git a/fs/internal.h b/fs/internal.h
> index 84f371193f74..30217f0ff4c6 100644
> --- a/fs/internal.h
> +++ b/fs/internal.h
> @@ -35,6 +35,11 @@ static inline void bdev_cache_init(void)
>   int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
>   		get_block_t *get_block, const struct iomap *iomap);
>   
> +/*
> + * iomap/direct-io.c
> + */
> +int iomap_dio_init(void);
> +
>   /*
>    * char_dev.c
>    */
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 49938419fcc7..9f791db473e4 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -1990,6 +1990,12 @@ EXPORT_SYMBOL_GPL(iomap_writepages);
>   
>   static int __init iomap_init(void)
>   {
> +	int ret;
> +
> +	ret = iomap_dio_init();
> +	if (ret)
> +		return ret;
> +
>   	return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
>   			   offsetof(struct iomap_ioend, io_bio),
>   			   BIOSET_NEED_BVECS);

I suppose that it does not matter that zero_fs_block is leaked if this 
fails (or is it even leaked?), as I don't think that failing that 
bioset_init() call is handled at all.

> diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
> index f3b43d223a46..b95600b254a3 100644
> --- a/fs/iomap/direct-io.c
> +++ b/fs/iomap/direct-io.c
> @@ -27,6 +27,13 @@
>   #define IOMAP_DIO_WRITE		(1U << 30)
>   #define IOMAP_DIO_DIRTY		(1U << 31)
>   
> +/*
> + * Used for sub block zeroing in iomap_dio_zero()
> + */
> +#define ZERO_FSB_SIZE (65536)
> +#define ZERO_FSB_ORDER (get_order(ZERO_FSB_SIZE))
> +static struct page *zero_fs_block;
> +
>   struct iomap_dio {
>   	struct kiocb		*iocb;
>   	const struct iomap_dio_ops *dops;
> @@ -52,6 +59,16 @@ struct iomap_dio {
>   	};
>   };
>   
> +int iomap_dio_init(void)
> +{
> +	zero_fs_block = alloc_pages(GFP_KERNEL | __GFP_ZERO, ZERO_FSB_ORDER);
> +
> +	if (!zero_fs_block)
> +		return -ENOMEM;
> +
> +	return 0;
> +}
> +
>   static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
>   		struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
>   {
> @@ -236,17 +253,22 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
>   		loff_t pos, unsigned len)
>   {
>   	struct inode *inode = file_inode(dio->iocb->ki_filp);
> -	struct page *page = ZERO_PAGE(0);
>   	struct bio *bio;
>   
> +	/*
> +	 * Max block size supported is 64k
> +	 */
> +	WARN_ON_ONCE(len > ZERO_FSB_SIZE);

JFYI, As mentioned in 
https://lore.kernel.org/linux-xfs/20240429174746.2132161-1-john.g.garry@oracle.com/T/#m5354e2b2531a5552a8b8acd4a95342ed4d7500f2, 
we would like to support an arbitrary size. Maybe I will need to loop 
for zeroing sizes > 64K.

> +
>   	bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
>   	fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
>   				  GFP_KERNEL);
> +
>   	bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
>   	bio->bi_private = dio;
>   	bio->bi_end_io = iomap_dio_bio_end_io;
>   
> -	__bio_add_page(bio, page, len, 0);
> +	__bio_add_page(bio, zero_fs_block, len, 0);
>   	iomap_dio_submit_bio(iter, dio, bio, pos);
>   }
>
Pankaj Raghav (Samsung) June 11, 2024, 9:41 a.m. UTC | #2
> > index 49938419fcc7..9f791db473e4 100644
> > --- a/fs/iomap/buffered-io.c
> > +++ b/fs/iomap/buffered-io.c
> > @@ -1990,6 +1990,12 @@ EXPORT_SYMBOL_GPL(iomap_writepages);
> >   static int __init iomap_init(void)
> >   {
> > +	int ret;
> > +
> > +	ret = iomap_dio_init();
> > +	if (ret)
> > +		return ret;
> > +
> >   	return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
> >   			   offsetof(struct iomap_ioend, io_bio),
> >   			   BIOSET_NEED_BVECS);
> 
> I suppose that it does not matter that zero_fs_block is leaked if this fails
> (or is it even leaked?), as I don't think that failing that bioset_init()
> call is handled at all.

If bioset_init fails, then we have even more problems than just a leaked
64k memory? ;)

Do you have something like this in mind?

diff --git a/fs/internal.h b/fs/internal.h
index 30217f0ff4c6..def96c7ed9ea 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -39,6 +39,7 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
  * iomap/direct-io.c
  */
 int iomap_dio_init(void);
+void iomap_dio_exit(void);
 
 /*
  * char_dev.c
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 9f791db473e4..8d8b9e62201f 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1994,10 +1994,16 @@ static int __init iomap_init(void)
 
        ret = iomap_dio_init();
        if (ret)
-               return ret;
+               goto out;
 
-       return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
+       ret = bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
                           offsetof(struct iomap_ioend, io_bio),
                           BIOSET_NEED_BVECS);
+       if (!ret)
+               goto out;
+
+       iomap_dio_exit();
+out:
+       return ret;
 }
 fs_initcall(iomap_init);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index b95600b254a3..f4c9445ca50d 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -69,6 +69,12 @@ int iomap_dio_init(void)
        return 0;
 }
 
+void iomap_dio_exit(void)
+{
+       __free_pages(zero_fs_block, ZERO_FSB_ORDER);
+
+}
+
 static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
                struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
 {

> 
> > +
> >   static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
> >   		struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
> >   {
> > @@ -236,17 +253,22 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
> >   		loff_t pos, unsigned len)
> >   {
> >   	struct inode *inode = file_inode(dio->iocb->ki_filp);
> > -	struct page *page = ZERO_PAGE(0);
> >   	struct bio *bio;
> > +	/*
> > +	 * Max block size supported is 64k
> > +	 */
> > +	WARN_ON_ONCE(len > ZERO_FSB_SIZE);
> 
> JFYI, As mentioned in https://lore.kernel.org/linux-xfs/20240429174746.2132161-1-john.g.garry@oracle.com/T/#m5354e2b2531a5552a8b8acd4a95342ed4d7500f2,
> we would like to support an arbitrary size. Maybe I will need to loop for
> zeroing sizes > 64K.

The initial patches were looping with a ZERO_PAGE(0), but the initial
feedback was to use a huge zero page. But when I discussed that at LSF,
the people thought we will be using a lot of memory for sub-block
memory, especially on architectures with 64k base page size.

So for now a good tradeoff between memory usage and efficiency was to
use a 64k buffer as that is the maximum FSB we support.[1]

IIUC, you will be using this function also to zero out the extent and
not just a FSB?

I think we could resort to looping until we have a way to request
arbitrary zero folios without having to allocate at it in
iomap_dio_alloc_bio() for every IO.

[1] https://lore.kernel.org/linux-xfs/20240529134509.120826-8-kernel@pankajraghav.com/

--
Pankaj
John Garry June 11, 2024, 10 a.m. UTC | #3
On 11/06/2024 10:41, Pankaj Raghav (Samsung) wrote:
>>> 8419fcc7..9f791db473e4 100644
>>> --- a/fs/iomap/buffered-io.c
>>> +++ b/fs/iomap/buffered-io.c
>>> @@ -1990,6 +1990,12 @@ EXPORT_SYMBOL_GPL(iomap_writepages);
>>>    static int __init iomap_init(void)
>>>    {
>>> +	int ret;
>>> +
>>> +	ret = iomap_dio_init();
>>> +	if (ret)
>>> +		return ret;
>>> +
>>>    	return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
>>>    			   offsetof(struct iomap_ioend, io_bio),
>>>    			   BIOSET_NEED_BVECS);
>> I suppose that it does not matter that zero_fs_block is leaked if this fails
>> (or is it even leaked?), as I don't think that failing that bioset_init()
>> call is handled at all.
> If bioset_init fails, then we have even more problems than just a leaked
> 64k memory? 
Darrick J. Wong June 12, 2024, 8:40 p.m. UTC | #4
On Fri, Jun 07, 2024 at 02:58:58PM +0000, Pankaj Raghav (Samsung) wrote:
> From: Pankaj Raghav <p.raghav@samsung.com>
> 
> iomap_dio_zero() will pad a fs block with zeroes if the direct IO size
> < fs block size. iomap_dio_zero() has an implicit assumption that fs block
> size < page_size. This is true for most filesystems at the moment.
> 
> If the block size > page size, this will send the contents of the page
> next to zero page(as len > PAGE_SIZE) to the underlying block device,
> causing FS corruption.
> 
> iomap is a generic infrastructure and it should not make any assumptions
> about the fs block size and the page size of the system.
> 
> Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
> Reviewed-by: Hannes Reinecke <hare@suse.de>
> ---
>  fs/internal.h          |  5 +++++
>  fs/iomap/buffered-io.c |  6 ++++++
>  fs/iomap/direct-io.c   | 26 ++++++++++++++++++++++++--
>  3 files changed, 35 insertions(+), 2 deletions(-)
> 
> diff --git a/fs/internal.h b/fs/internal.h
> index 84f371193f74..30217f0ff4c6 100644
> --- a/fs/internal.h
> +++ b/fs/internal.h
> @@ -35,6 +35,11 @@ static inline void bdev_cache_init(void)
>  int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
>  		get_block_t *get_block, const struct iomap *iomap);
>  
> +/*
> + * iomap/direct-io.c
> + */
> +int iomap_dio_init(void);
> +
>  /*
>   * char_dev.c
>   */
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 49938419fcc7..9f791db473e4 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -1990,6 +1990,12 @@ EXPORT_SYMBOL_GPL(iomap_writepages);
>  
>  static int __init iomap_init(void)
>  {
> +	int ret;
> +
> +	ret = iomap_dio_init();
> +	if (ret)
> +		return ret;
> +
>  	return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
>  			   offsetof(struct iomap_ioend, io_bio),
>  			   BIOSET_NEED_BVECS);
> diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
> index f3b43d223a46..b95600b254a3 100644
> --- a/fs/iomap/direct-io.c
> +++ b/fs/iomap/direct-io.c
> @@ -27,6 +27,13 @@
>  #define IOMAP_DIO_WRITE		(1U << 30)
>  #define IOMAP_DIO_DIRTY		(1U << 31)
>  
> +/*
> + * Used for sub block zeroing in iomap_dio_zero()
> + */
> +#define ZERO_FSB_SIZE (65536)
> +#define ZERO_FSB_ORDER (get_order(ZERO_FSB_SIZE))
> +static struct page *zero_fs_block;

Er... zero_page_64k ?

Since it's a permanent allocation, can we also mark the memory ro?

> +
>  struct iomap_dio {
>  	struct kiocb		*iocb;
>  	const struct iomap_dio_ops *dops;
> @@ -52,6 +59,16 @@ struct iomap_dio {
>  	};
>  };
>  
> +int iomap_dio_init(void)
> +{
> +	zero_fs_block = alloc_pages(GFP_KERNEL | __GFP_ZERO, ZERO_FSB_ORDER);
> +
> +	if (!zero_fs_block)
> +		return -ENOMEM;
> +
> +	return 0;
> +}

Can't we just turn this into another fs_initcall() instead of exporting
it just so we can call it from iomap_init?  And maybe rename the
existing iomap_init to iomap_pagecache_init or something, for clarity's
sake?

--D

> +
>  static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
>  		struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
>  {
> @@ -236,17 +253,22 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
>  		loff_t pos, unsigned len)
>  {
>  	struct inode *inode = file_inode(dio->iocb->ki_filp);
> -	struct page *page = ZERO_PAGE(0);
>  	struct bio *bio;
>  
> +	/*
> +	 * Max block size supported is 64k
> +	 */
> +	WARN_ON_ONCE(len > ZERO_FSB_SIZE);
> +
>  	bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
>  	fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
>  				  GFP_KERNEL);
> +
>  	bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
>  	bio->bi_private = dio;
>  	bio->bi_end_io = iomap_dio_bio_end_io;
>  
> -	__bio_add_page(bio, page, len, 0);
> +	__bio_add_page(bio, zero_fs_block, len, 0);
>  	iomap_dio_submit_bio(iter, dio, bio, pos);
>  }
>  
> -- 
> 2.44.1
> 
>
Pankaj Raghav (Samsung) June 17, 2024, 3:08 p.m. UTC | #5
On Wed, Jun 12, 2024 at 01:40:25PM -0700, Darrick J. Wong wrote:
> On Fri, Jun 07, 2024 at 02:58:58PM +0000, Pankaj Raghav (Samsung) wrote:
> > From: Pankaj Raghav <p.raghav@samsung.com>
> > 
> > diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
> > index f3b43d223a46..b95600b254a3 100644
> > --- a/fs/iomap/direct-io.c
> > +++ b/fs/iomap/direct-io.c
> > @@ -27,6 +27,13 @@
> >  #define IOMAP_DIO_WRITE		(1U << 30)
> >  #define IOMAP_DIO_DIRTY		(1U << 31)
> >  
> > +/*
> > + * Used for sub block zeroing in iomap_dio_zero()
> > + */
> > +#define ZERO_FSB_SIZE (65536)
> > +#define ZERO_FSB_ORDER (get_order(ZERO_FSB_SIZE))
> > +static struct page *zero_fs_block;
> 
> Er... zero_page_64k ?
> 
> Since it's a permanent allocation, can we also mark the memory ro?

Sounds good.
> 
> > +
> >  struct iomap_dio {
> >  	struct kiocb		*iocb;
> >  	const struct iomap_dio_ops *dops;
> > @@ -52,6 +59,16 @@ struct iomap_dio {
> >  	};
> >  };
> >  
> > +int iomap_dio_init(void)
> > +{
> > +	zero_fs_block = alloc_pages(GFP_KERNEL | __GFP_ZERO, ZERO_FSB_ORDER);
> > +
> > +	if (!zero_fs_block)
> > +		return -ENOMEM;
> > +
> > +	return 0;
> > +}
> 
> Can't we just turn this into another fs_initcall() instead of exporting
> it just so we can call it from iomap_init?  And maybe rename the
> existing iomap_init to iomap_pagecache_init or something, for clarity's
> sake?

Yeah, probably iomap_pagecache_init() in fs/iomap/buffered-io.c and
iomap_dio_init() in fs/iomap/direct-io.c 
> 
> --D
>
diff mbox series

Patch

diff --git a/fs/internal.h b/fs/internal.h
index 84f371193f74..30217f0ff4c6 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -35,6 +35,11 @@  static inline void bdev_cache_init(void)
 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
 		get_block_t *get_block, const struct iomap *iomap);
 
+/*
+ * iomap/direct-io.c
+ */
+int iomap_dio_init(void);
+
 /*
  * char_dev.c
  */
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 49938419fcc7..9f791db473e4 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1990,6 +1990,12 @@  EXPORT_SYMBOL_GPL(iomap_writepages);
 
 static int __init iomap_init(void)
 {
+	int ret;
+
+	ret = iomap_dio_init();
+	if (ret)
+		return ret;
+
 	return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
 			   offsetof(struct iomap_ioend, io_bio),
 			   BIOSET_NEED_BVECS);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index f3b43d223a46..b95600b254a3 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -27,6 +27,13 @@ 
 #define IOMAP_DIO_WRITE		(1U << 30)
 #define IOMAP_DIO_DIRTY		(1U << 31)
 
+/*
+ * Used for sub block zeroing in iomap_dio_zero()
+ */
+#define ZERO_FSB_SIZE (65536)
+#define ZERO_FSB_ORDER (get_order(ZERO_FSB_SIZE))
+static struct page *zero_fs_block;
+
 struct iomap_dio {
 	struct kiocb		*iocb;
 	const struct iomap_dio_ops *dops;
@@ -52,6 +59,16 @@  struct iomap_dio {
 	};
 };
 
+int iomap_dio_init(void)
+{
+	zero_fs_block = alloc_pages(GFP_KERNEL | __GFP_ZERO, ZERO_FSB_ORDER);
+
+	if (!zero_fs_block)
+		return -ENOMEM;
+
+	return 0;
+}
+
 static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
 		struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
 {
@@ -236,17 +253,22 @@  static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
 		loff_t pos, unsigned len)
 {
 	struct inode *inode = file_inode(dio->iocb->ki_filp);
-	struct page *page = ZERO_PAGE(0);
 	struct bio *bio;
 
+	/*
+	 * Max block size supported is 64k
+	 */
+	WARN_ON_ONCE(len > ZERO_FSB_SIZE);
+
 	bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
 	fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
 				  GFP_KERNEL);
+
 	bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
 	bio->bi_private = dio;
 	bio->bi_end_io = iomap_dio_bio_end_io;
 
-	__bio_add_page(bio, page, len, 0);
+	__bio_add_page(bio, zero_fs_block, len, 0);
 	iomap_dio_submit_bio(iter, dio, bio, pos);
 }