Message ID | 20230626140223.61209-1-guochunhai@vivo.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [f2fs-dev] f2fs: fix to avoid breaking fsync node chain | expand |
Please ignore this patch. Sorry that I make a mistake. 720037f939fa ("f2fs: don't overwrite node block by SSR") has fixed this issue aleady. On 2023/6/26 22:02, 郭纯海 wrote: > The fsync node chain can break when a node block is used, freed and reused > in the chain. To avoid this, all blocks in the chain should be recorded and > not reused before the next checkpoint. However, this approach may require > too many resources. Instead, this patch records all related segments in a > bitmap as a compromise solution. > Since LFS allocation mode and GC operations do not reuse or modify obsolete > block before the next checkpoint, we just need to ensure the segments in > the bitmap aren't reallocated in SSR allocation mode. > > Signed-off-by: Chunhai Guo <guochunhai@vivo.com> > --- > fs/f2fs/checkpoint.c | 11 +++++++++++ > fs/f2fs/gc.c | 14 ++++++++++++++ > fs/f2fs/segment.c | 10 ++++++++++ > fs/f2fs/segment.h | 3 +++ > 4 files changed, 38 insertions(+) > > diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c > index 8fd3b7f9fb88..940f7a5568c0 100644 > --- a/fs/f2fs/checkpoint.c > +++ b/fs/f2fs/checkpoint.c > @@ -1447,6 +1447,15 @@ u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi) > return get_sectors_written(sbi->sb->s_bdev); > } > > +void f2fs_reset_node_chain_segmap(struct f2fs_sb_info *sbi) > +{ > + struct sit_info *sit_i = SIT_I(sbi); > + > + spin_lock(&sit_i->segmap_lock); > + memset(sit_i->node_chain_segmap, 0, f2fs_bitmap_size(MAIN_SEGS(sbi))); > + spin_unlock(&sit_i->segmap_lock); > +} > + > static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) > { > struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); > @@ -1594,6 +1603,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) > > f2fs_reset_fsync_node_info(sbi); > > + f2fs_reset_node_chain_segmap(sbi); > + > clear_sbi_flag(sbi, SBI_IS_DIRTY); > clear_sbi_flag(sbi, SBI_NEED_CP); > clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); > diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c > index 01effd3fcb6c..e899c775b7fa 100644 > --- a/fs/f2fs/gc.c > +++ b/fs/f2fs/gc.c > @@ -733,6 +733,15 @@ static int f2fs_gc_pinned_control(struct inode *inode, int gc_type, > return -EAGAIN; > } > > +static bool f2fs_seg_in_node_chain(struct sit_info *sm, unsigned int segno) > +{ > + bool ret; > + > + spin_lock(&sm->segmap_lock); > + ret = test_bit(segno, sm->node_chain_segmap); > + spin_unlock(&sm->segmap_lock); > + return ret; > +} > /* > * This function is called from two paths. > * One is garbage collection and the other is SSR segment selection. > @@ -871,6 +880,11 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, > } > } > > + /* if segno is used by node chain, find another one. */ > + if ((p.alloc_mode == SSR || p.alloc_mode == AT_SSR) > + && f2fs_seg_in_node_chain(sm, segno)) > + goto next; > + > if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) > goto next; > > diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c > index 0457d620011f..0f37efb88ad3 100644 > --- a/fs/f2fs/segment.c > +++ b/fs/f2fs/segment.c > @@ -3475,6 +3475,10 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, > up_write(&sit_i->sentry_lock); > > if (page && IS_NODESEG(type)) { > + spin_lock(&sit_i->segmap_lock); > + set_bit(GET_SEGNO(sbi, *new_blkaddr), sit_i->node_chain_segmap); > + spin_unlock(&sit_i->segmap_lock); > + > fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); > > f2fs_inode_chksum_set(sbi, page); > @@ -4442,6 +4446,12 @@ static int build_sit_info(struct f2fs_sb_info *sbi) > return -ENOMEM; > #endif > > + sit_i->node_chain_segmap = f2fs_kvzalloc(sbi, > + main_bitmap_size, GFP_KERNEL); > + if (!sit_i->node_chain_segmap) > + return -ENOMEM; > + > + spin_lock_init(&sit_i->segmap_lock); > sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); > sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; > sit_i->written_valid_blocks = 0; > diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h > index 2ca8fb5d0dc4..e0de075f959b 100644 > --- a/fs/f2fs/segment.h > +++ b/fs/f2fs/segment.h > @@ -264,6 +264,9 @@ struct sit_info { > unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */ > > unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */ > + > + spinlock_t segmap_lock; /* node chain segmap lock */ > + unsigned long *node_chain_segmap; /* segment bitmap used by node chain */ > }; > > struct free_segmap_info {
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 8fd3b7f9fb88..940f7a5568c0 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1447,6 +1447,15 @@ u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi) return get_sectors_written(sbi->sb->s_bdev); } +void f2fs_reset_node_chain_segmap(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + + spin_lock(&sit_i->segmap_lock); + memset(sit_i->node_chain_segmap, 0, f2fs_bitmap_size(MAIN_SEGS(sbi))); + spin_unlock(&sit_i->segmap_lock); +} + static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); @@ -1594,6 +1603,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) f2fs_reset_fsync_node_info(sbi); + f2fs_reset_node_chain_segmap(sbi); + clear_sbi_flag(sbi, SBI_IS_DIRTY); clear_sbi_flag(sbi, SBI_NEED_CP); clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 01effd3fcb6c..e899c775b7fa 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -733,6 +733,15 @@ static int f2fs_gc_pinned_control(struct inode *inode, int gc_type, return -EAGAIN; } +static bool f2fs_seg_in_node_chain(struct sit_info *sm, unsigned int segno) +{ + bool ret; + + spin_lock(&sm->segmap_lock); + ret = test_bit(segno, sm->node_chain_segmap); + spin_unlock(&sm->segmap_lock); + return ret; +} /* * This function is called from two paths. * One is garbage collection and the other is SSR segment selection. @@ -871,6 +880,11 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, } } + /* if segno is used by node chain, find another one. */ + if ((p.alloc_mode == SSR || p.alloc_mode == AT_SSR) + && f2fs_seg_in_node_chain(sm, segno)) + goto next; + if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) goto next; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 0457d620011f..0f37efb88ad3 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -3475,6 +3475,10 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, up_write(&sit_i->sentry_lock); if (page && IS_NODESEG(type)) { + spin_lock(&sit_i->segmap_lock); + set_bit(GET_SEGNO(sbi, *new_blkaddr), sit_i->node_chain_segmap); + spin_unlock(&sit_i->segmap_lock); + fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); f2fs_inode_chksum_set(sbi, page); @@ -4442,6 +4446,12 @@ static int build_sit_info(struct f2fs_sb_info *sbi) return -ENOMEM; #endif + sit_i->node_chain_segmap = f2fs_kvzalloc(sbi, + main_bitmap_size, GFP_KERNEL); + if (!sit_i->node_chain_segmap) + return -ENOMEM; + + spin_lock_init(&sit_i->segmap_lock); sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; sit_i->written_valid_blocks = 0; diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 2ca8fb5d0dc4..e0de075f959b 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -264,6 +264,9 @@ struct sit_info { unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */ unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */ + + spinlock_t segmap_lock; /* node chain segmap lock */ + unsigned long *node_chain_segmap; /* segment bitmap used by node chain */ }; struct free_segmap_info {
The fsync node chain can break when a node block is used, freed and reused in the chain. To avoid this, all blocks in the chain should be recorded and not reused before the next checkpoint. However, this approach may require too many resources. Instead, this patch records all related segments in a bitmap as a compromise solution. Since LFS allocation mode and GC operations do not reuse or modify obsolete block before the next checkpoint, we just need to ensure the segments in the bitmap aren't reallocated in SSR allocation mode. Signed-off-by: Chunhai Guo <guochunhai@vivo.com> --- fs/f2fs/checkpoint.c | 11 +++++++++++ fs/f2fs/gc.c | 14 ++++++++++++++ fs/f2fs/segment.c | 10 ++++++++++ fs/f2fs/segment.h | 3 +++ 4 files changed, 38 insertions(+)