Message ID | 20200910234707.5504-3-willy@infradead.org (mailing list archive) |
---|---|
State | Accepted |
Commit | 24addd848a45747bcda68418710c72fdc8e145e4 |
Headers | show |
Series | THP iomap patches for 5.10 | expand |
On 9/10/20 6:47 PM, Matthew Wilcox (Oracle) wrote: > This helper is useful for both THPs and for supporting block size larger > than page size. Convert all users that I could find (we have a few > different ways of writing this idiom, and I may have missed some). > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > Reviewed-by: Christoph Hellwig <hch@lst.de> > Reviewed-by: Dave Chinner <dchinner@redhat.com> > Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> For jfs: Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com> > --- > fs/iomap/buffered-io.c | 8 ++++---- > fs/jfs/jfs_metapage.c | 2 +- > fs/xfs/xfs_aops.c | 2 +- > include/linux/pagemap.h | 16 ++++++++++++++++ > 4 files changed, 22 insertions(+), 6 deletions(-) > > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index d81a9a86c5aa..330f86b825d7 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -46,7 +46,7 @@ iomap_page_create(struct inode *inode, struct page *page) > { > struct iomap_page *iop = to_iomap_page(page); > > - if (iop || i_blocksize(inode) == PAGE_SIZE) > + if (iop || i_blocks_per_page(inode, page) <= 1) > return iop; > > iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); > @@ -147,7 +147,7 @@ iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len) > unsigned int i; > > spin_lock_irqsave(&iop->uptodate_lock, flags); > - for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { > + for (i = 0; i < i_blocks_per_page(inode, page); i++) { > if (i >= first && i <= last) > set_bit(i, iop->uptodate); > else if (!test_bit(i, iop->uptodate)) > @@ -1077,7 +1077,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page, > mapping_set_error(inode->i_mapping, -EIO); > } > > - WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop); > + WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop); > WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0); > > if (!iop || atomic_dec_and_test(&iop->write_count)) > @@ -1373,7 +1373,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, > int error = 0, count = 0, i; > LIST_HEAD(submit_list); > > - WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop); > + WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop); > WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0); > > /* > diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c > index a2f5338a5ea1..176580f54af9 100644 > --- a/fs/jfs/jfs_metapage.c > +++ b/fs/jfs/jfs_metapage.c > @@ -473,7 +473,7 @@ static int metapage_readpage(struct file *fp, struct page *page) > struct inode *inode = page->mapping->host; > struct bio *bio = NULL; > int block_offset; > - int blocks_per_page = PAGE_SIZE >> inode->i_blkbits; > + int blocks_per_page = i_blocks_per_page(inode, page); > sector_t page_start; /* address of page in fs blocks */ > sector_t pblock; > int xlen; > diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c > index b35611882ff9..55d126d4e096 100644 > --- a/fs/xfs/xfs_aops.c > +++ b/fs/xfs/xfs_aops.c > @@ -544,7 +544,7 @@ xfs_discard_page( > page, ip->i_ino, offset); > > error = xfs_bmap_punch_delalloc_range(ip, start_fsb, > - PAGE_SIZE / i_blocksize(inode)); > + i_blocks_per_page(inode, page)); > if (error && !XFS_FORCED_SHUTDOWN(mp)) > xfs_alert(mp, "page discard unable to remove delalloc mapping."); > out_invalidate: > diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h > index 50d2c39b47ab..f7f602040913 100644 > --- a/include/linux/pagemap.h > +++ b/include/linux/pagemap.h > @@ -975,4 +975,20 @@ static inline int page_mkwrite_check_truncate(struct page *page, > return offset; > } > > +/** > + * i_blocks_per_page - How many blocks fit in this page. > + * @inode: The inode which contains the blocks. > + * @page: The page (head page if the page is a THP). > + * > + * If the block size is larger than the size of this page, return zero. > + * > + * Context: The caller should hold a refcount on the page to prevent it > + * from being split. > + * Return: The number of filesystem blocks covered by this page. > + */ > +static inline > +unsigned int i_blocks_per_page(struct inode *inode, struct page *page) > +{ > + return thp_size(page) >> inode->i_blkbits; > +} > #endif /* _LINUX_PAGEMAP_H */ >
From: Matthew Wilcox (Oracle) > Sent: 11 September 2020 00:47 > This helper is useful for both THPs and for supporting block size larger > than page size. Convert all users that I could find (we have a few > different ways of writing this idiom, and I may have missed some). > ... > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index d81a9a86c5aa..330f86b825d7 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -46,7 +46,7 @@ iomap_page_create(struct inode *inode, struct page *page) > { > struct iomap_page *iop = to_iomap_page(page); > > - if (iop || i_blocksize(inode) == PAGE_SIZE) > + if (iop || i_blocks_per_page(inode, page) <= 1) > return iop; > > iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); > @@ -147,7 +147,7 @@ iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len) > unsigned int i; > > spin_lock_irqsave(&iop->uptodate_lock, flags); > - for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { > + for (i = 0; i < i_blocks_per_page(inode, page); i++) { You probably don't want to call the helper every time around the loop. David - Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK Registration No: 1397386 (Wales)
On Tue, Sep 15, 2020 at 03:40:52PM +0000, David Laight wrote: > > @@ -147,7 +147,7 @@ iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len) > > unsigned int i; > > > > spin_lock_irqsave(&iop->uptodate_lock, flags); > > - for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { > > + for (i = 0; i < i_blocks_per_page(inode, page); i++) { > > You probably don't want to call the helper every time > around the loop. This is a classic example of focusing on the details and missing the larger picture. We don't want the loop at all, and if you'd kept reading the patch series, you'd see it disappear later.
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index d81a9a86c5aa..330f86b825d7 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -46,7 +46,7 @@ iomap_page_create(struct inode *inode, struct page *page) { struct iomap_page *iop = to_iomap_page(page); - if (iop || i_blocksize(inode) == PAGE_SIZE) + if (iop || i_blocks_per_page(inode, page) <= 1) return iop; iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); @@ -147,7 +147,7 @@ iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len) unsigned int i; spin_lock_irqsave(&iop->uptodate_lock, flags); - for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { + for (i = 0; i < i_blocks_per_page(inode, page); i++) { if (i >= first && i <= last) set_bit(i, iop->uptodate); else if (!test_bit(i, iop->uptodate)) @@ -1077,7 +1077,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page, mapping_set_error(inode->i_mapping, -EIO); } - WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop); + WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop); WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0); if (!iop || atomic_dec_and_test(&iop->write_count)) @@ -1373,7 +1373,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, int error = 0, count = 0, i; LIST_HEAD(submit_list); - WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop); + WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop); WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0); /* diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index a2f5338a5ea1..176580f54af9 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -473,7 +473,7 @@ static int metapage_readpage(struct file *fp, struct page *page) struct inode *inode = page->mapping->host; struct bio *bio = NULL; int block_offset; - int blocks_per_page = PAGE_SIZE >> inode->i_blkbits; + int blocks_per_page = i_blocks_per_page(inode, page); sector_t page_start; /* address of page in fs blocks */ sector_t pblock; int xlen; diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index b35611882ff9..55d126d4e096 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -544,7 +544,7 @@ xfs_discard_page( page, ip->i_ino, offset); error = xfs_bmap_punch_delalloc_range(ip, start_fsb, - PAGE_SIZE / i_blocksize(inode)); + i_blocks_per_page(inode, page)); if (error && !XFS_FORCED_SHUTDOWN(mp)) xfs_alert(mp, "page discard unable to remove delalloc mapping."); out_invalidate: diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 50d2c39b47ab..f7f602040913 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -975,4 +975,20 @@ static inline int page_mkwrite_check_truncate(struct page *page, return offset; } +/** + * i_blocks_per_page - How many blocks fit in this page. + * @inode: The inode which contains the blocks. + * @page: The page (head page if the page is a THP). + * + * If the block size is larger than the size of this page, return zero. + * + * Context: The caller should hold a refcount on the page to prevent it + * from being split. + * Return: The number of filesystem blocks covered by this page. + */ +static inline +unsigned int i_blocks_per_page(struct inode *inode, struct page *page) +{ + return thp_size(page) >> inode->i_blkbits; +} #endif /* _LINUX_PAGEMAP_H */