Message ID | 20221217021711.172247-4-eddyz87@gmail.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | BPF |
Headers | show |
Series | reduce BPF_ID_MAP_SIZE to fit only valid programs | expand |
On 12/16/22 6:17 PM, Eduard Zingerman wrote: > BPF limits stack usage by MAX_BPF_STACK bytes across all call frames, > however this is enforced by function check_max_stack_depth() which is > executed after do_check_{subprogs,main}(). > > This means that when check_ids() is executed the maximal stack depth is not > yet verified, thus in theory the number of stack spills might be > MAX_CALL_FRAMES * MAX_BPF_STACK / BPF_REG_SIZE. > > However, any program with stack usage deeper than > MAX_BPF_STACK / BPF_REG_SIZE would be rejected by verifier. > > Hence save some memory by reducing the BPF_ID_MAP_SIZE. > > This is a follow up for > https://lore.kernel.org/bpf/CAEf4BzYN1JmY9t03pnCHc4actob80wkBz2vk90ihJCBzi8CT9w@mail.gmail.com/ > > Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> > Suggested-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Yonghong Song <yhs@fb.com>
On Fri, Dec 16, 2022 at 6:17 PM Eduard Zingerman <eddyz87@gmail.com> wrote: > > BPF limits stack usage by MAX_BPF_STACK bytes across all call frames, > however this is enforced by function check_max_stack_depth() which is > executed after do_check_{subprogs,main}(). > > This means that when check_ids() is executed the maximal stack depth is not > yet verified, thus in theory the number of stack spills might be > MAX_CALL_FRAMES * MAX_BPF_STACK / BPF_REG_SIZE. > > However, any program with stack usage deeper than > MAX_BPF_STACK / BPF_REG_SIZE would be rejected by verifier. > > Hence save some memory by reducing the BPF_ID_MAP_SIZE. > > This is a follow up for > https://lore.kernel.org/bpf/CAEf4BzYN1JmY9t03pnCHc4actob80wkBz2vk90ihJCBzi8CT9w@mail.gmail.com/ > > Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> > Suggested-by: Andrii Nakryiko <andrii@kernel.org> > --- LGTM, thanks. Acked-by: Andrii Nakryiko <andrii@kernel.org> > include/linux/bpf_verifier.h | 4 ++-- > kernel/bpf/verifier.c | 6 ++++-- > 2 files changed, 6 insertions(+), 4 deletions(-) > > diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h > index 53d175cbaa02..da72e16f1dee 100644 > --- a/include/linux/bpf_verifier.h > +++ b/include/linux/bpf_verifier.h > @@ -274,8 +274,8 @@ struct bpf_id_pair { > }; > > #define MAX_CALL_FRAMES 8 > -/* Maximum number of register states that can exist at once */ > -#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES) > +/* Maximum number of register states that can exist at once in a valid program */ > +#define BPF_ID_MAP_SIZE (MAX_BPF_REG * MAX_CALL_FRAMES + MAX_BPF_STACK / BPF_REG_SIZE) > struct bpf_verifier_state { > /* call stack tracking */ > struct bpf_func_state *frame[MAX_CALL_FRAMES]; > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index a5255a0dcbb6..fb040516a946 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -12951,8 +12951,10 @@ static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap) > if (idmap[i].old == old_id) > return idmap[i].cur == cur_id; > } > - /* We ran out of idmap slots, which should be impossible */ > - WARN_ON_ONCE(1); > + /* Run out of slots in idmap, conservatively return false, cached > + * state will not be reused. The BPF_ID_MAP_SIZE is sufficiently > + * large to fit all valid programs. > + */ > return false; > } > > -- > 2.38.2 >
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 53d175cbaa02..da72e16f1dee 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -274,8 +274,8 @@ struct bpf_id_pair { }; #define MAX_CALL_FRAMES 8 -/* Maximum number of register states that can exist at once */ -#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES) +/* Maximum number of register states that can exist at once in a valid program */ +#define BPF_ID_MAP_SIZE (MAX_BPF_REG * MAX_CALL_FRAMES + MAX_BPF_STACK / BPF_REG_SIZE) struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a5255a0dcbb6..fb040516a946 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12951,8 +12951,10 @@ static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap) if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } - /* We ran out of idmap slots, which should be impossible */ - WARN_ON_ONCE(1); + /* Run out of slots in idmap, conservatively return false, cached + * state will not be reused. The BPF_ID_MAP_SIZE is sufficiently + * large to fit all valid programs. + */ return false; }
BPF limits stack usage by MAX_BPF_STACK bytes across all call frames, however this is enforced by function check_max_stack_depth() which is executed after do_check_{subprogs,main}(). This means that when check_ids() is executed the maximal stack depth is not yet verified, thus in theory the number of stack spills might be MAX_CALL_FRAMES * MAX_BPF_STACK / BPF_REG_SIZE. However, any program with stack usage deeper than MAX_BPF_STACK / BPF_REG_SIZE would be rejected by verifier. Hence save some memory by reducing the BPF_ID_MAP_SIZE. This is a follow up for https://lore.kernel.org/bpf/CAEf4BzYN1JmY9t03pnCHc4actob80wkBz2vk90ihJCBzi8CT9w@mail.gmail.com/ Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Suggested-by: Andrii Nakryiko <andrii@kernel.org> --- include/linux/bpf_verifier.h | 4 ++-- kernel/bpf/verifier.c | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-)