Message ID | 20241024132225.2271667-13-yukuai1@huaweicloud.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | fix CVE-2024-46701 | expand |
On Thu, Oct 24, 2024 at 09:22:25PM +0800, Yu Kuai wrote: > diff --git a/lib/maple_tree.c b/lib/maple_tree.c > index 5328e08723d7..c57b6fc4db2e 100644 > --- a/lib/maple_tree.c > +++ b/lib/maple_tree.c > @@ -2239,6 +2239,8 @@ static inline void mas_node_or_none(struct ma_state *mas, > > /* > * mas_wr_node_walk() - Find the correct offset for the index in the @mas. > + * If @mas->index cannot be found within the containing > + * node, we traverse to the last entry in the node. > * @wr_mas: The maple write state > * > * Uses mas_slot_locked() and does not need to worry about dead nodes. > @@ -3655,7 +3657,7 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas) > return true; > } > > -static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) > +static void mas_wr_walk_index(struct ma_wr_state *wr_mas) > { > struct ma_state *mas = wr_mas->mas; > > @@ -3664,11 +3666,9 @@ static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) > wr_mas->content = mas_slot_locked(mas, wr_mas->slots, > mas->offset); > if (ma_is_leaf(wr_mas->type)) > - return true; > + return; > mas_wr_walk_traverse(wr_mas); > - > } > - return true; > } > /* > * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. > @@ -3899,8 +3899,8 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) > memset(&b_node, 0, sizeof(struct maple_big_node)); > /* Copy l_mas and store the value in b_node. */ > mas_store_b_node(&l_wr_mas, &b_node, l_mas.end); > - /* Copy r_mas into b_node. */ > - if (r_mas.offset <= r_mas.end) > + /* Copy r_mas into b_node if there is anything to copy. */ > + if (r_mas.max > r_mas.last) > mas_mab_cp(&r_mas, r_mas.offset, r_mas.end, > &b_node, b_node.b_end + 1); > else > -- > 2.39.2 > This is a good example of where you've gone horribly wrong, this relies on 31c532a8af57 ("maple_tree: add end of node tracking to the maple state") which is not in 6.6. You reverted (!!) my backported patch for this that _does not require this_ only to pull in 31c532a8af57 in order to apply the upstream version of my fix over that. This is totally unnecessary and I can't see why _on earth_ you would need 31c532a8af57. You need to correctly identify what patches need to be backported and _fix merge conflicts_ accordingly, like I did with the patch that you decided to revert. In the kernel it is absolutely unacceptable to arbitrarily backport huge amounts of patches you don't understand in order to avoid merge conflicts, you may be breaking all kinds of things without realising. You have to find the _minimal_ change and _fix merge conflicts_. Stable is not a playground, it's what millions (billions?) of kernels rely upon. In any case, I think Liam's reply suggests that we should be looking at maybe 1 thing to backport? If we even need to? Please in future be more cautious, and if you are unsure how to proceed, cc- the relevant maintainers (+ all authors of patches you intend to backport/revert) in an RFC. Thanks.
Hi, 在 2024/11/06 23:02, Lorenzo Stoakes 写道: > On Thu, Oct 24, 2024 at 09:22:25PM +0800, Yu Kuai wrote: > >> diff --git a/lib/maple_tree.c b/lib/maple_tree.c >> index 5328e08723d7..c57b6fc4db2e 100644 >> --- a/lib/maple_tree.c >> +++ b/lib/maple_tree.c >> @@ -2239,6 +2239,8 @@ static inline void mas_node_or_none(struct ma_state *mas, >> >> /* >> * mas_wr_node_walk() - Find the correct offset for the index in the @mas. >> + * If @mas->index cannot be found within the containing >> + * node, we traverse to the last entry in the node. >> * @wr_mas: The maple write state >> * >> * Uses mas_slot_locked() and does not need to worry about dead nodes. >> @@ -3655,7 +3657,7 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas) >> return true; >> } >> >> -static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) >> +static void mas_wr_walk_index(struct ma_wr_state *wr_mas) >> { >> struct ma_state *mas = wr_mas->mas; >> >> @@ -3664,11 +3666,9 @@ static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) >> wr_mas->content = mas_slot_locked(mas, wr_mas->slots, >> mas->offset); >> if (ma_is_leaf(wr_mas->type)) >> - return true; >> + return; >> mas_wr_walk_traverse(wr_mas); >> - >> } >> - return true; >> } >> /* >> * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. >> @@ -3899,8 +3899,8 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) >> memset(&b_node, 0, sizeof(struct maple_big_node)); >> /* Copy l_mas and store the value in b_node. */ >> mas_store_b_node(&l_wr_mas, &b_node, l_mas.end); >> - /* Copy r_mas into b_node. */ >> - if (r_mas.offset <= r_mas.end) >> + /* Copy r_mas into b_node if there is anything to copy. */ >> + if (r_mas.max > r_mas.last) >> mas_mab_cp(&r_mas, r_mas.offset, r_mas.end, >> &b_node, b_node.b_end + 1); >> else >> -- >> 2.39.2 >> > > This is a good example of where you've gone horribly wrong, this relies on > 31c532a8af57 ("maple_tree: add end of node tracking to the maple state") which > is not in 6.6. > > You reverted (!!) my backported patch for this that _does not require this_ > only to pull in 31c532a8af57 in order to apply the upstream version of my > fix over that. > > This is totally unnecessary and I can't see why _on earth_ you would need > 31c532a8af57. > > You need to correctly identify what patches need to be backported and _fix > merge conflicts_ accordingly, like I did with the patch that you decided to > revert. > > In the kernel it is absolutely unacceptable to arbitrarily backport huge > amounts of patches you don't understand in order to avoid merge conflicts, > you may be breaking all kinds of things without realising. > > You have to find the _minimal_ change and _fix merge conflicts_. Thanks for the suggestions, I do understand, however, I'll just give up this because I'm not confident to fix confilcts for maple tree. Other folks will have to this if they care about this cve for v6.6. > > Stable is not a playground, it's what millions (billions?) of kernels rely > upon. > > In any case, I think Liam's reply suggests that we should be looking at > maybe 1 thing to backport? If we even need to? Keep using xarray for patch 27 is wrong, I think. xarray is 32-bit and if the offset overflow, readdir will found nothing, this is more severe than the orignal cve. > > Please in future be more cautious, and if you are unsure how to proceed, > cc- the relevant maintainers (+ all authors of patches you intend to > backport/revert) in an RFC. Thanks. Of course. Thanks, Kuai > > . >
diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 5328e08723d7..c57b6fc4db2e 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -2239,6 +2239,8 @@ static inline void mas_node_or_none(struct ma_state *mas, /* * mas_wr_node_walk() - Find the correct offset for the index in the @mas. + * If @mas->index cannot be found within the containing + * node, we traverse to the last entry in the node. * @wr_mas: The maple write state * * Uses mas_slot_locked() and does not need to worry about dead nodes. @@ -3655,7 +3657,7 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas) return true; } -static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) +static void mas_wr_walk_index(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; @@ -3664,11 +3666,9 @@ static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) wr_mas->content = mas_slot_locked(mas, wr_mas->slots, mas->offset); if (ma_is_leaf(wr_mas->type)) - return true; + return; mas_wr_walk_traverse(wr_mas); - } - return true; } /* * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. @@ -3899,8 +3899,8 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) memset(&b_node, 0, sizeof(struct maple_big_node)); /* Copy l_mas and store the value in b_node. */ mas_store_b_node(&l_wr_mas, &b_node, l_mas.end); - /* Copy r_mas into b_node. */ - if (r_mas.offset <= r_mas.end) + /* Copy r_mas into b_node if there is anything to copy. */ + if (r_mas.max > r_mas.last) mas_mab_cp(&r_mas, r_mas.offset, r_mas.end, &b_node, b_node.b_end + 1); else