Message ID | 20250221081109.734170-3-zhengqixing@huaweicloud.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | badblocks: bugfix and cleanup for badblocks | expand |
在 2025/02/21 16:10, Zheng Qixing 写道: > From: Li Nan <linan122@huawei.com> > > Factor out try_adjacent_combine(), and it will be used in the later patch. > > Signed-off-by: Li Nan <linan122@huawei.com> > --- > block/badblocks.c | 40 ++++++++++++++++++++++++++-------------- > 1 file changed, 26 insertions(+), 14 deletions(-) > LGTM Reviewed-by: Yu Kuai <yukuai3@huawei.com> > diff --git a/block/badblocks.c b/block/badblocks.c > index bcee057efc47..f069c93e986d 100644 > --- a/block/badblocks.c > +++ b/block/badblocks.c > @@ -855,6 +855,31 @@ static void badblocks_update_acked(struct badblocks *bb) > bb->unacked_exist = 0; > } > > +/* > + * Return 'true' if the range indicated by 'bad' is exactly backward > + * overlapped with the bad range (from bad table) indexed by 'behind'. > + */ > +static bool try_adjacent_combine(struct badblocks *bb, int prev) > +{ > + u64 *p = bb->page; > + > + if (prev >= 0 && (prev + 1) < bb->count && > + BB_END(p[prev]) == BB_OFFSET(p[prev + 1]) && > + (BB_LEN(p[prev]) + BB_LEN(p[prev + 1])) <= BB_MAX_LEN && > + BB_ACK(p[prev]) == BB_ACK(p[prev + 1])) { > + p[prev] = BB_MAKE(BB_OFFSET(p[prev]), > + BB_LEN(p[prev]) + BB_LEN(p[prev + 1]), > + BB_ACK(p[prev])); > + > + if ((prev + 2) < bb->count) > + memmove(p + prev + 1, p + prev + 2, > + (bb->count - (prev + 2)) * 8); > + bb->count--; > + return true; > + } > + return false; > +} > + > /* Do exact work to set bad block range into the bad block table */ > static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, > int acknowledged) > @@ -1022,20 +1047,7 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, > * merged. (prev < 0) condition is not handled here, > * because it's already complicated enough. > */ > - if (prev >= 0 && > - (prev + 1) < bb->count && > - BB_END(p[prev]) == BB_OFFSET(p[prev + 1]) && > - (BB_LEN(p[prev]) + BB_LEN(p[prev + 1])) <= BB_MAX_LEN && > - BB_ACK(p[prev]) == BB_ACK(p[prev + 1])) { > - p[prev] = BB_MAKE(BB_OFFSET(p[prev]), > - BB_LEN(p[prev]) + BB_LEN(p[prev + 1]), > - BB_ACK(p[prev])); > - > - if ((prev + 2) < bb->count) > - memmove(p + prev + 1, p + prev + 2, > - (bb->count - (prev + 2)) * 8); > - bb->count--; > - } > + try_adjacent_combine(bb, prev); > > if (space_desired && !badblocks_full(bb)) { > s = orig_start; >
On Fri, Feb 21, 2025 at 04:10:59PM +0800, Zheng Qixing wrote: > From: Li Nan <linan122@huawei.com> > > Factor out try_adjacent_combine(), and it will be used in the later patch. > Which patch is try_adjacent_combine() used in? I don't see that at a quick glance. Thanks. Coly Li > Signed-off-by: Li Nan <linan122@huawei.com> > --- > block/badblocks.c | 40 ++++++++++++++++++++++++++-------------- > 1 file changed, 26 insertions(+), 14 deletions(-) > > diff --git a/block/badblocks.c b/block/badblocks.c > index bcee057efc47..f069c93e986d 100644 > --- a/block/badblocks.c > +++ b/block/badblocks.c > @@ -855,6 +855,31 @@ static void badblocks_update_acked(struct badblocks *bb) > bb->unacked_exist = 0; > } > > +/* > + * Return 'true' if the range indicated by 'bad' is exactly backward > + * overlapped with the bad range (from bad table) indexed by 'behind'. > + */ > +static bool try_adjacent_combine(struct badblocks *bb, int prev) > +{ > + u64 *p = bb->page; > + > + if (prev >= 0 && (prev + 1) < bb->count && > + BB_END(p[prev]) == BB_OFFSET(p[prev + 1]) && > + (BB_LEN(p[prev]) + BB_LEN(p[prev + 1])) <= BB_MAX_LEN && > + BB_ACK(p[prev]) == BB_ACK(p[prev + 1])) { > + p[prev] = BB_MAKE(BB_OFFSET(p[prev]), > + BB_LEN(p[prev]) + BB_LEN(p[prev + 1]), > + BB_ACK(p[prev])); > + > + if ((prev + 2) < bb->count) > + memmove(p + prev + 1, p + prev + 2, > + (bb->count - (prev + 2)) * 8); > + bb->count--; > + return true; > + } > + return false; > +} > + > /* Do exact work to set bad block range into the bad block table */ > static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, > int acknowledged) > @@ -1022,20 +1047,7 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, > * merged. (prev < 0) condition is not handled here, > * because it's already complicated enough. > */ > - if (prev >= 0 && > - (prev + 1) < bb->count && > - BB_END(p[prev]) == BB_OFFSET(p[prev + 1]) && > - (BB_LEN(p[prev]) + BB_LEN(p[prev + 1])) <= BB_MAX_LEN && > - BB_ACK(p[prev]) == BB_ACK(p[prev + 1])) { > - p[prev] = BB_MAKE(BB_OFFSET(p[prev]), > - BB_LEN(p[prev]) + BB_LEN(p[prev + 1]), > - BB_ACK(p[prev])); > - > - if ((prev + 2) < bb->count) > - memmove(p + prev + 1, p + prev + 2, > - (bb->count - (prev + 2)) * 8); > - bb->count--; > - } > + try_adjacent_combine(bb, prev); > > if (space_desired && !badblocks_full(bb)) { > s = orig_start; > -- > 2.39.2 >
> 2025年2月21日 18:04,Coly Li <i@coly.li> 写道: > > On Fri, Feb 21, 2025 at 04:10:59PM +0800, Zheng Qixing wrote: >> From: Li Nan <linan122@huawei.com> >> >> Factor out try_adjacent_combine(), and it will be used in the later patch. >> > > Which patch is try_adjacent_combine() used in? I don't see that at a quick glance. OK, I see it is in ack_all_badblocks(). Ignore the above question. Coly Li
diff --git a/block/badblocks.c b/block/badblocks.c index bcee057efc47..f069c93e986d 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -855,6 +855,31 @@ static void badblocks_update_acked(struct badblocks *bb) bb->unacked_exist = 0; } +/* + * Return 'true' if the range indicated by 'bad' is exactly backward + * overlapped with the bad range (from bad table) indexed by 'behind'. + */ +static bool try_adjacent_combine(struct badblocks *bb, int prev) +{ + u64 *p = bb->page; + + if (prev >= 0 && (prev + 1) < bb->count && + BB_END(p[prev]) == BB_OFFSET(p[prev + 1]) && + (BB_LEN(p[prev]) + BB_LEN(p[prev + 1])) <= BB_MAX_LEN && + BB_ACK(p[prev]) == BB_ACK(p[prev + 1])) { + p[prev] = BB_MAKE(BB_OFFSET(p[prev]), + BB_LEN(p[prev]) + BB_LEN(p[prev + 1]), + BB_ACK(p[prev])); + + if ((prev + 2) < bb->count) + memmove(p + prev + 1, p + prev + 2, + (bb->count - (prev + 2)) * 8); + bb->count--; + return true; + } + return false; +} + /* Do exact work to set bad block range into the bad block table */ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, int acknowledged) @@ -1022,20 +1047,7 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, * merged. (prev < 0) condition is not handled here, * because it's already complicated enough. */ - if (prev >= 0 && - (prev + 1) < bb->count && - BB_END(p[prev]) == BB_OFFSET(p[prev + 1]) && - (BB_LEN(p[prev]) + BB_LEN(p[prev + 1])) <= BB_MAX_LEN && - BB_ACK(p[prev]) == BB_ACK(p[prev + 1])) { - p[prev] = BB_MAKE(BB_OFFSET(p[prev]), - BB_LEN(p[prev]) + BB_LEN(p[prev + 1]), - BB_ACK(p[prev])); - - if ((prev + 2) < bb->count) - memmove(p + prev + 1, p + prev + 2, - (bb->count - (prev + 2)) * 8); - bb->count--; - } + try_adjacent_combine(bb, prev); if (space_desired && !badblocks_full(bb)) { s = orig_start;