Message ID | b1d1bab106ddc4456224c0bf1c1bfcfaea4844b7.1638886948.git.johannes.thumshirn@wdc.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | btrfs: first batch of zoned cleanups | expand |
On Tue, Dec 07, 2021 at 06:28:34AM -0800, Johannes Thumshirn wrote: > Encapsulate the inode lock needed for serializing the data relocation > writes on a zoned filesystem into a helper. > > This streamlines the code reading flow and hides special casing for > zoned filesystems. > > Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> > Reviewed-by: Josef Bacik <josef@toxicpanda.com> > --- > fs/btrfs/extent_io.c | 8 ++------ > fs/btrfs/zoned.h | 17 +++++++++++++++++ > 2 files changed, 19 insertions(+), 6 deletions(-) > > diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c > index 1a67f4b3986b..cc27e6e6d6ce 100644 > --- a/fs/btrfs/extent_io.c > +++ b/fs/btrfs/extent_io.c > @@ -5184,8 +5184,6 @@ int extent_writepages(struct address_space *mapping, > struct writeback_control *wbc) > { > struct inode *inode = mapping->host; > - const bool data_reloc = btrfs_is_data_reloc_root(BTRFS_I(inode)->root); > - const bool zoned = btrfs_is_zoned(BTRFS_I(inode)->root->fs_info); > int ret = 0; > struct extent_page_data epd = { > .bio_ctrl = { 0 }, > @@ -5197,11 +5195,9 @@ int extent_writepages(struct address_space *mapping, > * Allow only a single thread to do the reloc work in zoned mode to > * protect the write pointer updates. > */ > - if (data_reloc && zoned) > - btrfs_inode_lock(inode, 0); > + btrfs_zoned_data_reloc_lock(inode); > ret = extent_write_cache_pages(mapping, wbc, &epd); > - if (data_reloc && zoned) > - btrfs_inode_unlock(inode, 0); > + btrfs_zoned_data_reloc_unlock(inode); > ASSERT(ret <= 0); > if (ret < 0) { > end_write_bio(&epd, ret); > diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h > index 4344f4818389..e3eaf03a3422 100644 > --- a/fs/btrfs/zoned.h > +++ b/fs/btrfs/zoned.h > @@ -8,6 +8,7 @@ > #include "volumes.h" > #include "disk-io.h" > #include "block-group.h" > +#include "btrfs_inode.h" > > /* > * Block groups with more than this value (percents) of unusable space will be > @@ -354,4 +355,20 @@ static inline void btrfs_clear_treelog_bg(struct btrfs_block_group *bg) > spin_unlock(&fs_info->treelog_bg_lock); > } > > +static inline void btrfs_zoned_data_reloc_lock(struct inode *inode) All internal API should use struct btrfs_inode, applied with the following diff: --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -5195,9 +5195,9 @@ int extent_writepages(struct address_space *mapping, * Allow only a single thread to do the reloc work in zoned mode to * protect the write pointer updates. */ - btrfs_zoned_data_reloc_lock(inode); + btrfs_zoned_data_reloc_lock(BTRFS_I(inode)); ret = extent_write_cache_pages(mapping, wbc, &epd); - btrfs_zoned_data_reloc_unlock(inode); + btrfs_zoned_data_reloc_unlock(BTRFS_I(inode)); ASSERT(ret <= 0); if (ret < 0) { end_write_bio(&epd, ret); diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h index e3eaf03a3422..a7b4cd6dd9f4 100644 --- a/fs/btrfs/zoned.h +++ b/fs/btrfs/zoned.h @@ -355,20 +355,20 @@ static inline void btrfs_clear_treelog_bg(struct btrfs_block_group *bg) spin_unlock(&fs_info->treelog_bg_lock); } -static inline void btrfs_zoned_data_reloc_lock(struct inode *inode) +static inline void btrfs_zoned_data_reloc_lock(struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info)) - btrfs_inode_lock(inode, 0); + btrfs_inode_lock(&inode->vfs_inode, 0); } -static inline void btrfs_zoned_data_reloc_unlock(struct inode *inode) +static inline void btrfs_zoned_data_reloc_unlock(struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info)) - btrfs_inode_unlock(inode, 0); + btrfs_inode_unlock(&inode->vfs_inode, 0); } #endif
On 07/12/2021 20:08, David Sterba wrote: > All internal API should use struct btrfs_inode, applied with the > following diff: Ah ok, good to know. I wanted to have a bit less pointer casing with using 'struct inode'. Anyways I don't mind. Thanks
On Wed, Dec 08, 2021 at 09:06:14AM +0000, Johannes Thumshirn wrote: > On 07/12/2021 20:08, David Sterba wrote: > > All internal API should use struct btrfs_inode, applied with the > > following diff: > > Ah ok, good to know. I wanted to have a bit less pointer casing > with using 'struct inode'. Anyways I don't mind. Now it's a mix of both and needs more works to unify but at least new code should be done the right way. The pointer chasing will go eventually and I don't think it has a measurable impact.
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1a67f4b3986b..cc27e6e6d6ce 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -5184,8 +5184,6 @@ int extent_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; - const bool data_reloc = btrfs_is_data_reloc_root(BTRFS_I(inode)->root); - const bool zoned = btrfs_is_zoned(BTRFS_I(inode)->root->fs_info); int ret = 0; struct extent_page_data epd = { .bio_ctrl = { 0 }, @@ -5197,11 +5195,9 @@ int extent_writepages(struct address_space *mapping, * Allow only a single thread to do the reloc work in zoned mode to * protect the write pointer updates. */ - if (data_reloc && zoned) - btrfs_inode_lock(inode, 0); + btrfs_zoned_data_reloc_lock(inode); ret = extent_write_cache_pages(mapping, wbc, &epd); - if (data_reloc && zoned) - btrfs_inode_unlock(inode, 0); + btrfs_zoned_data_reloc_unlock(inode); ASSERT(ret <= 0); if (ret < 0) { end_write_bio(&epd, ret); diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h index 4344f4818389..e3eaf03a3422 100644 --- a/fs/btrfs/zoned.h +++ b/fs/btrfs/zoned.h @@ -8,6 +8,7 @@ #include "volumes.h" #include "disk-io.h" #include "block-group.h" +#include "btrfs_inode.h" /* * Block groups with more than this value (percents) of unusable space will be @@ -354,4 +355,20 @@ static inline void btrfs_clear_treelog_bg(struct btrfs_block_group *bg) spin_unlock(&fs_info->treelog_bg_lock); } +static inline void btrfs_zoned_data_reloc_lock(struct inode *inode) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + + if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info)) + btrfs_inode_lock(inode, 0); +} + +static inline void btrfs_zoned_data_reloc_unlock(struct inode *inode) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + + if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info)) + btrfs_inode_unlock(inode, 0); +} + #endif