diff mbox series

[RFCv5,3/5] iomap: Add iop's uptodate state handling functions

Message ID 5372f29f986052f37b45c368a0eb8eed25eb8fdb.1683485700.git.ritesh.list@gmail.com (mailing list archive)
State Mainlined, archived
Headers show
Series iomap: Add support for per-block dirty state to improve write performance | expand

Commit Message

Ritesh Harjani (IBM) May 7, 2023, 7:27 p.m. UTC
Firstly this patch renames iop->uptodate to iop->state bitmap.
This is because we will add dirty state to iop->state bitmap in later
patches. So it makes sense to rename the iop->uptodate bitmap to
iop->state.

Secondly this patch also adds other helpers for uptodate state bitmap
handling of iop->state.

No functionality change in this patch.

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
---
 fs/iomap/buffered-io.c | 78 +++++++++++++++++++++++++++++++-----------
 1 file changed, 58 insertions(+), 20 deletions(-)

Comments

Brian Foster May 15, 2023, 3:10 p.m. UTC | #1
On Mon, May 08, 2023 at 12:57:58AM +0530, Ritesh Harjani (IBM) wrote:
> Firstly this patch renames iop->uptodate to iop->state bitmap.
> This is because we will add dirty state to iop->state bitmap in later
> patches. So it makes sense to rename the iop->uptodate bitmap to
> iop->state.
> 
> Secondly this patch also adds other helpers for uptodate state bitmap
> handling of iop->state.
> 
> No functionality change in this patch.
> 
> Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
> ---
>  fs/iomap/buffered-io.c | 78 +++++++++++++++++++++++++++++++-----------
>  1 file changed, 58 insertions(+), 20 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index e732581dc2d4..5103b644e115 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
...
> @@ -43,6 +43,47 @@ static inline struct iomap_page *to_iomap_page(struct folio *folio)
...
> +/*
> + * iop related helpers for checking uptodate/dirty state of per-block
> + * or range of blocks within a folio
> + */
> +static bool iop_test_full_uptodate(struct folio *folio)
> +{
> +	struct iomap_page *iop = to_iomap_page(folio);
> +	struct inode *inode = folio->mapping->host;
> +
> +	WARN_ON(!iop);

It looks like an oops or something is imminent here if iop is NULL. Why
the warn (here and in a couple other places)?

Brian

> +	return iop_bitmap_full(iop, i_blocks_per_folio(inode, folio));
> +}
> +
> +static bool iop_test_block_uptodate(struct folio *folio, unsigned int block)
> +{
> +	struct iomap_page *iop = to_iomap_page(folio);
> +
> +	WARN_ON(!iop);
> +	return iop_test_block(iop, block);
> +}
> +
>  static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
>  				   size_t off, size_t len)
>  {
> @@ -53,12 +94,11 @@ static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
>  	unsigned long flags;
>  
>  	if (iop) {
> -		spin_lock_irqsave(&iop->uptodate_lock, flags);
> -		bitmap_set(iop->uptodate, first_blk, nr_blks);
> -		if (bitmap_full(iop->uptodate,
> -				i_blocks_per_folio(inode, folio)))
> +		spin_lock_irqsave(&iop->state_lock, flags);
> +		iop_set_range(iop, first_blk, nr_blks);
> +		if (iop_test_full_uptodate(folio))
>  			folio_mark_uptodate(folio);
> -		spin_unlock_irqrestore(&iop->uptodate_lock, flags);
> +		spin_unlock_irqrestore(&iop->state_lock, flags);
>  	} else {
>  		folio_mark_uptodate(folio);
>  	}
> @@ -79,12 +119,12 @@ static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
>  	else
>  		gfp = GFP_NOFS | __GFP_NOFAIL;
>  
> -	iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
> +	iop = kzalloc(struct_size(iop, state, BITS_TO_LONGS(nr_blocks)),
>  		      gfp);
>  	if (iop) {
> -		spin_lock_init(&iop->uptodate_lock);
> +		spin_lock_init(&iop->state_lock);
>  		if (folio_test_uptodate(folio))
> -			bitmap_fill(iop->uptodate, nr_blocks);
> +			iop_set_range(iop, 0, nr_blocks);
>  		folio_attach_private(folio, iop);
>  	}
>  	return iop;
> @@ -93,15 +133,13 @@ static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
>  static void iop_free(struct folio *folio)
>  {
>  	struct iomap_page *iop = to_iomap_page(folio);
> -	struct inode *inode = folio->mapping->host;
> -	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
>  
>  	if (!iop)
>  		return;
>  	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
>  	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
> -	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
> -			folio_test_uptodate(folio));
> +	WARN_ON_ONCE(iop_test_full_uptodate(folio) !=
> +		     folio_test_uptodate(folio));
>  	folio_detach_private(folio);
>  	kfree(iop);
>  }
> @@ -132,7 +170,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
>  
>  		/* move forward for each leading block marked uptodate */
>  		for (i = first; i <= last; i++) {
> -			if (!test_bit(i, iop->uptodate))
> +			if (!iop_test_block_uptodate(folio, i))
>  				break;
>  			*pos += block_size;
>  			poff += block_size;
> @@ -142,7 +180,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
>  
>  		/* truncate len if we find any trailing uptodate block(s) */
>  		for ( ; i <= last; i++) {
> -			if (test_bit(i, iop->uptodate)) {
> +			if (iop_test_block_uptodate(folio, i)) {
>  				plen -= (last - i + 1) * block_size;
>  				last = i - 1;
>  				break;
> @@ -450,7 +488,7 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
>  	last = (from + count - 1) >> inode->i_blkbits;
>  
>  	for (i = first; i <= last; i++)
> -		if (!test_bit(i, iop->uptodate))
> +		if (!iop_test_block_uptodate(folio, i))
>  			return false;
>  	return true;
>  }
> @@ -1634,7 +1672,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
>  	 * invalid, grab a new one.
>  	 */
>  	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
> -		if (iop && !test_bit(i, iop->uptodate))
> +		if (iop && !iop_test_block_uptodate(folio, i))
>  			continue;
>  
>  		error = wpc->ops->map_blocks(wpc, inode, pos);
> -- 
> 2.39.2
>
Ritesh Harjani (IBM) May 16, 2023, 10:14 a.m. UTC | #2
Brian Foster <bfoster@redhat.com> writes:

> On Mon, May 08, 2023 at 12:57:58AM +0530, Ritesh Harjani (IBM) wrote:
>> Firstly this patch renames iop->uptodate to iop->state bitmap.
>> This is because we will add dirty state to iop->state bitmap in later
>> patches. So it makes sense to rename the iop->uptodate bitmap to
>> iop->state.
>>
>> Secondly this patch also adds other helpers for uptodate state bitmap
>> handling of iop->state.
>>
>> No functionality change in this patch.
>>
>> Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
>> ---
>>  fs/iomap/buffered-io.c | 78 +++++++++++++++++++++++++++++++-----------
>>  1 file changed, 58 insertions(+), 20 deletions(-)
>>
>> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
>> index e732581dc2d4..5103b644e115 100644
>> --- a/fs/iomap/buffered-io.c
>> +++ b/fs/iomap/buffered-io.c
> ...
>> @@ -43,6 +43,47 @@ static inline struct iomap_page *to_iomap_page(struct folio *folio)
> ...
>> +/*
>> + * iop related helpers for checking uptodate/dirty state of per-block
>> + * or range of blocks within a folio
>> + */
>> +static bool iop_test_full_uptodate(struct folio *folio)
>> +{
>> +	struct iomap_page *iop = to_iomap_page(folio);
>> +	struct inode *inode = folio->mapping->host;
>> +
>> +	WARN_ON(!iop);
>
> It looks like an oops or something is imminent here if iop is NULL. Why
> the warn (here and in a couple other places)?
>

I agree. I will remove it in the next revision.

-ritesh

> Brian
>
>> +	return iop_bitmap_full(iop, i_blocks_per_folio(inode, folio));
>> +}
>> +
>> +static bool iop_test_block_uptodate(struct folio *folio, unsigned int block)
>> +{
>> +	struct iomap_page *iop = to_iomap_page(folio);
>> +
>> +	WARN_ON(!iop);
>> +	return iop_test_block(iop, block);
>> +}
>> +
>>  static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
>>  				   size_t off, size_t len)
>>  {
>> @@ -53,12 +94,11 @@ static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
>>  	unsigned long flags;
>>
>>  	if (iop) {
>> -		spin_lock_irqsave(&iop->uptodate_lock, flags);
>> -		bitmap_set(iop->uptodate, first_blk, nr_blks);
>> -		if (bitmap_full(iop->uptodate,
>> -				i_blocks_per_folio(inode, folio)))
>> +		spin_lock_irqsave(&iop->state_lock, flags);
>> +		iop_set_range(iop, first_blk, nr_blks);
>> +		if (iop_test_full_uptodate(folio))
>>  			folio_mark_uptodate(folio);
>> -		spin_unlock_irqrestore(&iop->uptodate_lock, flags);
>> +		spin_unlock_irqrestore(&iop->state_lock, flags);
>>  	} else {
>>  		folio_mark_uptodate(folio);
>>  	}
>> @@ -79,12 +119,12 @@ static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
>>  	else
>>  		gfp = GFP_NOFS | __GFP_NOFAIL;
>>
>> -	iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
>> +	iop = kzalloc(struct_size(iop, state, BITS_TO_LONGS(nr_blocks)),
>>  		      gfp);
>>  	if (iop) {
>> -		spin_lock_init(&iop->uptodate_lock);
>> +		spin_lock_init(&iop->state_lock);
>>  		if (folio_test_uptodate(folio))
>> -			bitmap_fill(iop->uptodate, nr_blocks);
>> +			iop_set_range(iop, 0, nr_blocks);
>>  		folio_attach_private(folio, iop);
>>  	}
>>  	return iop;
>> @@ -93,15 +133,13 @@ static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
>>  static void iop_free(struct folio *folio)
>>  {
>>  	struct iomap_page *iop = to_iomap_page(folio);
>> -	struct inode *inode = folio->mapping->host;
>> -	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
>>
>>  	if (!iop)
>>  		return;
>>  	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
>>  	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
>> -	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
>> -			folio_test_uptodate(folio));
>> +	WARN_ON_ONCE(iop_test_full_uptodate(folio) !=
>> +		     folio_test_uptodate(folio));
>>  	folio_detach_private(folio);
>>  	kfree(iop);
>>  }
>> @@ -132,7 +170,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
>>
>>  		/* move forward for each leading block marked uptodate */
>>  		for (i = first; i <= last; i++) {
>> -			if (!test_bit(i, iop->uptodate))
>> +			if (!iop_test_block_uptodate(folio, i))
>>  				break;
>>  			*pos += block_size;
>>  			poff += block_size;
>> @@ -142,7 +180,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
>>
>>  		/* truncate len if we find any trailing uptodate block(s) */
>>  		for ( ; i <= last; i++) {
>> -			if (test_bit(i, iop->uptodate)) {
>> +			if (iop_test_block_uptodate(folio, i)) {
>>  				plen -= (last - i + 1) * block_size;
>>  				last = i - 1;
>>  				break;
>> @@ -450,7 +488,7 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
>>  	last = (from + count - 1) >> inode->i_blkbits;
>>
>>  	for (i = first; i <= last; i++)
>> -		if (!test_bit(i, iop->uptodate))
>> +		if (!iop_test_block_uptodate(folio, i))
>>  			return false;
>>  	return true;
>>  }
>> @@ -1634,7 +1672,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
>>  	 * invalid, grab a new one.
>>  	 */
>>  	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
>> -		if (iop && !test_bit(i, iop->uptodate))
>> +		if (iop && !iop_test_block_uptodate(folio, i))
>>  			continue;
>>
>>  		error = wpc->ops->map_blocks(wpc, inode, pos);
>> --
>> 2.39.2
>>
Christoph Hellwig May 18, 2023, 6:18 a.m. UTC | #3
> + * inline helpers for bitmap operations on iop->state
> + */
> +static inline void iop_set_range(struct iomap_page *iop, unsigned int start_blk,
> +				 unsigned int nr_blks)
> +{
> +	bitmap_set(iop->state, start_blk, nr_blks);
> +}
> +
> +static inline bool iop_test_block(struct iomap_page *iop, unsigned int block)
> +{
> +	return test_bit(block, iop->state);
> +}
> +
> +static inline bool iop_bitmap_full(struct iomap_page *iop,
> +				   unsigned int blks_per_folio)
> +{
> +	return bitmap_full(iop->state, blks_per_folio);
> +}

I don't really see much poin in these helpers, any particular reason
for adding them?

> +/*
> + * iop related helpers for checking uptodate/dirty state of per-block
> + * or range of blocks within a folio
> + */

I'm also not sure this comment adds a whole lot of value.

The rest looks good modulo the WARN_ONs already mentined by Brian.
Ritesh Harjani (IBM) May 19, 2023, 3:07 p.m. UTC | #4
Christoph Hellwig <hch@infradead.org> writes:

>> + * inline helpers for bitmap operations on iop->state
>> + */
>> +static inline void iop_set_range(struct iomap_page *iop, unsigned int start_blk,
>> +				 unsigned int nr_blks)
>> +{
>> +	bitmap_set(iop->state, start_blk, nr_blks);
>> +}
>> +
>> +static inline bool iop_test_block(struct iomap_page *iop, unsigned int block)
>> +{
>> +	return test_bit(block, iop->state);
>> +}
>> +
>> +static inline bool iop_bitmap_full(struct iomap_page *iop,
>> +				   unsigned int blks_per_folio)
>> +{
>> +	return bitmap_full(iop->state, blks_per_folio);
>> +}
>
> I don't really see much poin in these helpers, any particular reason
> for adding them?
>

We ended up modifying the APIs in v5. The idea on v4 was we will keep
iop_set_range() function which will be same for both uptodate and dirty.
The caller can pass start_blk depending upon whether we dirty/uptodate
needs to be marked.
But I guess with the API changes, we don't need this low level helpers
anymore. So If no one has any objection, I can kill this one liners.

>> +/*
>> + * iop related helpers for checking uptodate/dirty state of per-block
>> + * or range of blocks within a folio
>> + */
>
> I'm also not sure this comment adds a whole lot of value.
>
> The rest looks good modulo the WARN_ONs already mentined by Brian.

Sure. Thanks for the review!

-ritesh
Christoph Hellwig May 23, 2023, 6 a.m. UTC | #5
On Fri, May 19, 2023 at 08:37:01PM +0530, Ritesh Harjani wrote:
> We ended up modifying the APIs in v5. The idea on v4 was we will keep
> iop_set_range() function which will be same for both uptodate and dirty.
> The caller can pass start_blk depending upon whether we dirty/uptodate
> needs to be marked.
> But I guess with the API changes, we don't need this low level helpers
> anymore. So If no one has any objection, I can kill this one liners.

Yes, the actually uptodate/dirty helper isolate the caller from the
API.  We don't really need another tiny wrapper here.
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e732581dc2d4..5103b644e115 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -24,14 +24,14 @@ 
 #define IOEND_BATCH_SIZE	4096
 
 /*
- * Structure allocated for each folio when block size < folio size
- * to track sub-folio uptodate status and I/O completions.
+ * Structure allocated for each folio to track per-block uptodate state
+ * and I/O completions.
  */
 struct iomap_page {
 	atomic_t		read_bytes_pending;
 	atomic_t		write_bytes_pending;
-	spinlock_t		uptodate_lock;
-	unsigned long		uptodate[];
+	spinlock_t		state_lock;
+	unsigned long		state[];
 };
 
 static inline struct iomap_page *to_iomap_page(struct folio *folio)
@@ -43,6 +43,47 @@  static inline struct iomap_page *to_iomap_page(struct folio *folio)
 
 static struct bio_set iomap_ioend_bioset;
 
+/*
+ * inline helpers for bitmap operations on iop->state
+ */
+static inline void iop_set_range(struct iomap_page *iop, unsigned int start_blk,
+				 unsigned int nr_blks)
+{
+	bitmap_set(iop->state, start_blk, nr_blks);
+}
+
+static inline bool iop_test_block(struct iomap_page *iop, unsigned int block)
+{
+	return test_bit(block, iop->state);
+}
+
+static inline bool iop_bitmap_full(struct iomap_page *iop,
+				   unsigned int blks_per_folio)
+{
+	return bitmap_full(iop->state, blks_per_folio);
+}
+
+/*
+ * iop related helpers for checking uptodate/dirty state of per-block
+ * or range of blocks within a folio
+ */
+static bool iop_test_full_uptodate(struct folio *folio)
+{
+	struct iomap_page *iop = to_iomap_page(folio);
+	struct inode *inode = folio->mapping->host;
+
+	WARN_ON(!iop);
+	return iop_bitmap_full(iop, i_blocks_per_folio(inode, folio));
+}
+
+static bool iop_test_block_uptodate(struct folio *folio, unsigned int block)
+{
+	struct iomap_page *iop = to_iomap_page(folio);
+
+	WARN_ON(!iop);
+	return iop_test_block(iop, block);
+}
+
 static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
 				   size_t off, size_t len)
 {
@@ -53,12 +94,11 @@  static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
 	unsigned long flags;
 
 	if (iop) {
-		spin_lock_irqsave(&iop->uptodate_lock, flags);
-		bitmap_set(iop->uptodate, first_blk, nr_blks);
-		if (bitmap_full(iop->uptodate,
-				i_blocks_per_folio(inode, folio)))
+		spin_lock_irqsave(&iop->state_lock, flags);
+		iop_set_range(iop, first_blk, nr_blks);
+		if (iop_test_full_uptodate(folio))
 			folio_mark_uptodate(folio);
-		spin_unlock_irqrestore(&iop->uptodate_lock, flags);
+		spin_unlock_irqrestore(&iop->state_lock, flags);
 	} else {
 		folio_mark_uptodate(folio);
 	}
@@ -79,12 +119,12 @@  static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
 	else
 		gfp = GFP_NOFS | __GFP_NOFAIL;
 
-	iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
+	iop = kzalloc(struct_size(iop, state, BITS_TO_LONGS(nr_blocks)),
 		      gfp);
 	if (iop) {
-		spin_lock_init(&iop->uptodate_lock);
+		spin_lock_init(&iop->state_lock);
 		if (folio_test_uptodate(folio))
-			bitmap_fill(iop->uptodate, nr_blocks);
+			iop_set_range(iop, 0, nr_blocks);
 		folio_attach_private(folio, iop);
 	}
 	return iop;
@@ -93,15 +133,13 @@  static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
 static void iop_free(struct folio *folio)
 {
 	struct iomap_page *iop = to_iomap_page(folio);
-	struct inode *inode = folio->mapping->host;
-	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
 
 	if (!iop)
 		return;
 	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
 	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
-	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
-			folio_test_uptodate(folio));
+	WARN_ON_ONCE(iop_test_full_uptodate(folio) !=
+		     folio_test_uptodate(folio));
 	folio_detach_private(folio);
 	kfree(iop);
 }
@@ -132,7 +170,7 @@  static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
 
 		/* move forward for each leading block marked uptodate */
 		for (i = first; i <= last; i++) {
-			if (!test_bit(i, iop->uptodate))
+			if (!iop_test_block_uptodate(folio, i))
 				break;
 			*pos += block_size;
 			poff += block_size;
@@ -142,7 +180,7 @@  static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
 
 		/* truncate len if we find any trailing uptodate block(s) */
 		for ( ; i <= last; i++) {
-			if (test_bit(i, iop->uptodate)) {
+			if (iop_test_block_uptodate(folio, i)) {
 				plen -= (last - i + 1) * block_size;
 				last = i - 1;
 				break;
@@ -450,7 +488,7 @@  bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
 	last = (from + count - 1) >> inode->i_blkbits;
 
 	for (i = first; i <= last; i++)
-		if (!test_bit(i, iop->uptodate))
+		if (!iop_test_block_uptodate(folio, i))
 			return false;
 	return true;
 }
@@ -1634,7 +1672,7 @@  iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	 * invalid, grab a new one.
 	 */
 	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
-		if (iop && !test_bit(i, iop->uptodate))
+		if (iop && !iop_test_block_uptodate(folio, i))
 			continue;
 
 		error = wpc->ops->map_blocks(wpc, inode, pos);