Message ID | d47f7d1c80b0eabfee89a0fc9ef75bbe3d1eced7.1689885610.git.zhuyifei@google.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | BPF |
Headers | show |
Series | bpf/memalloc: Allow non-atomic alloc_bulk | expand |
On 7/21/2023 4:44 AM, YiFei Zhu wrote: > Sometimes during prefill all precpu chunks are full and atomic > __alloc_percpu_gfp would not allocate new chunks. This will cause > -ENOMEM immediately upon next unit_alloc. > > Prefill phase does not actually run in atomic context, so we can > use this fact to allocate non-atomically with GFP_KERNEL instead > of GFP_NOWAIT. This avoids the immediate -ENOMEM. Unfortunately > unit_alloc runs in atomic context, even from map item allocation in > syscalls, due to rcu_read_lock, so we can't do non-atomic > workarounds in unit_alloc. > > Fixes: 4ab67149f3c6 ("bpf: Add percpu allocation support to bpf_mem_alloc.") > Signed-off-by: YiFei Zhu <zhuyifei@google.com> Make sense to me, so Acked-by: Hou Tao <houtao1@huawei.com> But I don't know whether or not it is suitable for bpf tree. > --- > kernel/bpf/memalloc.c | 12 ++++++++---- > 1 file changed, 8 insertions(+), 4 deletions(-) > > diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c > index 0668bcd7c926..016249672b43 100644 > --- a/kernel/bpf/memalloc.c > +++ b/kernel/bpf/memalloc.c > @@ -154,13 +154,17 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) > } > > /* Mostly runs from irq_work except __init phase. */ > -static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) > +static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic) > { > struct mem_cgroup *memcg = NULL, *old_memcg; > unsigned long flags; > + gfp_t gfp; > void *obj; > int i; > > + gfp = __GFP_NOWARN | __GFP_ACCOUNT; > + gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL; > + > memcg = get_memcg(c); > old_memcg = set_active_memcg(memcg); > for (i = 0; i < cnt; i++) { > @@ -183,7 +187,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) > * will allocate from the current numa node which is what we > * want here. > */ > - obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT); > + obj = __alloc(c, node, gfp); > if (!obj) > break; > } > @@ -321,7 +325,7 @@ static void bpf_mem_refill(struct irq_work *work) > /* irq_work runs on this cpu and kmalloc will allocate > * from the current numa node which is what we want here. > */ > - alloc_bulk(c, c->batch, NUMA_NO_NODE); > + alloc_bulk(c, c->batch, NUMA_NO_NODE, true); > else if (cnt > c->high_watermark) > free_bulk(c); > } > @@ -367,7 +371,7 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) > * prog won't be doing more than 4 map_update_elem from > * irq disabled region > */ > - alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); > + alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false); > } > > /* When size != 0 bpf_mem_cache for each cpu.
On Thu, Jul 20, 2023 at 6:45 PM Hou Tao <houtao@huaweicloud.com> wrote: > On 7/21/2023 4:44 AM, YiFei Zhu wrote: > > Sometimes during prefill all precpu chunks are full and atomic > > __alloc_percpu_gfp would not allocate new chunks. This will cause > > -ENOMEM immediately upon next unit_alloc. > > > > Prefill phase does not actually run in atomic context, so we can > > use this fact to allocate non-atomically with GFP_KERNEL instead > > of GFP_NOWAIT. This avoids the immediate -ENOMEM. Unfortunately > > unit_alloc runs in atomic context, even from map item allocation in > > syscalls, due to rcu_read_lock, so we can't do non-atomic > > workarounds in unit_alloc. > > > > Fixes: 4ab67149f3c6 ("bpf: Add percpu allocation support to bpf_mem_alloc.") > > Signed-off-by: YiFei Zhu <zhuyifei@google.com> > > Make sense to me, so > > Acked-by: Hou Tao <houtao1@huawei.com> > > But I don't know whether or not it is suitable for bpf tree. I don't mind either way :) If changing to bpf-next requires a resend I can do that too. YiFei Zhu > > --- > > kernel/bpf/memalloc.c | 12 ++++++++---- > > 1 file changed, 8 insertions(+), 4 deletions(-) > > > > diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c > > index 0668bcd7c926..016249672b43 100644 > > --- a/kernel/bpf/memalloc.c > > +++ b/kernel/bpf/memalloc.c > > @@ -154,13 +154,17 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) > > } > > > > /* Mostly runs from irq_work except __init phase. */ > > -static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) > > +static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic) > > { > > struct mem_cgroup *memcg = NULL, *old_memcg; > > unsigned long flags; > > + gfp_t gfp; > > void *obj; > > int i; > > > > + gfp = __GFP_NOWARN | __GFP_ACCOUNT; > > + gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL; > > + > > memcg = get_memcg(c); > > old_memcg = set_active_memcg(memcg); > > for (i = 0; i < cnt; i++) { > > @@ -183,7 +187,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) > > * will allocate from the current numa node which is what we > > * want here. > > */ > > - obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT); > > + obj = __alloc(c, node, gfp); > > if (!obj) > > break; > > } > > @@ -321,7 +325,7 @@ static void bpf_mem_refill(struct irq_work *work) > > /* irq_work runs on this cpu and kmalloc will allocate > > * from the current numa node which is what we want here. > > */ > > - alloc_bulk(c, c->batch, NUMA_NO_NODE); > > + alloc_bulk(c, c->batch, NUMA_NO_NODE, true); > > else if (cnt > c->high_watermark) > > free_bulk(c); > > } > > @@ -367,7 +371,7 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) > > * prog won't be doing more than 4 map_update_elem from > > * irq disabled region > > */ > > - alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); > > + alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false); > > } > > > > /* When size != 0 bpf_mem_cache for each cpu. >
Hi, On 7/21/2023 10:31 AM, YiFei Zhu wrote: > On Thu, Jul 20, 2023 at 6:45 PM Hou Tao <houtao@huaweicloud.com> wrote: >> On 7/21/2023 4:44 AM, YiFei Zhu wrote: >>> Sometimes during prefill all precpu chunks are full and atomic >>> __alloc_percpu_gfp would not allocate new chunks. This will cause >>> -ENOMEM immediately upon next unit_alloc. >>> >>> Prefill phase does not actually run in atomic context, so we can >>> use this fact to allocate non-atomically with GFP_KERNEL instead >>> of GFP_NOWAIT. This avoids the immediate -ENOMEM. Unfortunately >>> unit_alloc runs in atomic context, even from map item allocation in >>> syscalls, due to rcu_read_lock, so we can't do non-atomic >>> workarounds in unit_alloc. >>> >>> Fixes: 4ab67149f3c6 ("bpf: Add percpu allocation support to bpf_mem_alloc.") >>> Signed-off-by: YiFei Zhu <zhuyifei@google.com> >> Make sense to me, so >> >> Acked-by: Hou Tao <houtao1@huawei.com> >> >> But I don't know whether or not it is suitable for bpf tree. > I don't mind either way :) If changing to bpf-next requires a resend I > can do that too. Please resend and rebase the patch again bpf-next tree.
On Wed, Jul 26, 2023 at 4:38 AM Hou Tao <houtao@huaweicloud.com> wrote: > > Hi, > > On 7/21/2023 10:31 AM, YiFei Zhu wrote: > > On Thu, Jul 20, 2023 at 6:45 PM Hou Tao <houtao@huaweicloud.com> wrote: > >> On 7/21/2023 4:44 AM, YiFei Zhu wrote: > >>> Sometimes during prefill all precpu chunks are full and atomic > >>> __alloc_percpu_gfp would not allocate new chunks. This will cause > >>> -ENOMEM immediately upon next unit_alloc. > >>> > >>> Prefill phase does not actually run in atomic context, so we can > >>> use this fact to allocate non-atomically with GFP_KERNEL instead > >>> of GFP_NOWAIT. This avoids the immediate -ENOMEM. Unfortunately > >>> unit_alloc runs in atomic context, even from map item allocation in > >>> syscalls, due to rcu_read_lock, so we can't do non-atomic > >>> workarounds in unit_alloc. > >>> > >>> Fixes: 4ab67149f3c6 ("bpf: Add percpu allocation support to bpf_mem_alloc.") > >>> Signed-off-by: YiFei Zhu <zhuyifei@google.com> > >> Make sense to me, so > >> > >> Acked-by: Hou Tao <houtao1@huawei.com> > >> > >> But I don't know whether or not it is suitable for bpf tree. > > I don't mind either way :) If changing to bpf-next requires a resend I > > can do that too. > > Please resend and rebase the patch again bpf-next tree. > Will do. Should I drop the Fixes tag then? YiFei Zhu
Hi, On 7/27/2023 2:44 AM, YiFei Zhu wrote: > On Wed, Jul 26, 2023 at 4:38 AM Hou Tao <houtao@huaweicloud.com> wrote: >> Hi, >> >> On 7/21/2023 10:31 AM, YiFei Zhu wrote: >>> On Thu, Jul 20, 2023 at 6:45 PM Hou Tao <houtao@huaweicloud.com> wrote: >>>> On 7/21/2023 4:44 AM, YiFei Zhu wrote: >>>>> Sometimes during prefill all precpu chunks are full and atomic >>>>> __alloc_percpu_gfp would not allocate new chunks. This will cause >>>>> -ENOMEM immediately upon next unit_alloc. >>>>> >>>>> Prefill phase does not actually run in atomic context, so we can >>>>> use this fact to allocate non-atomically with GFP_KERNEL instead >>>>> of GFP_NOWAIT. This avoids the immediate -ENOMEM. Unfortunately >>>>> unit_alloc runs in atomic context, even from map item allocation in >>>>> syscalls, due to rcu_read_lock, so we can't do non-atomic >>>>> workarounds in unit_alloc. >>>>> >>>>> Fixes: 4ab67149f3c6 ("bpf: Add percpu allocation support to bpf_mem_alloc.") >>>>> Signed-off-by: YiFei Zhu <zhuyifei@google.com> >>>> Make sense to me, so >>>> >>>> Acked-by: Hou Tao <houtao1@huawei.com> >>>> >>>> But I don't know whether or not it is suitable for bpf tree. >>> I don't mind either way :) If changing to bpf-next requires a resend I >>> can do that too. >> Please resend and rebase the patch again bpf-next tree. >> > Will do. Should I drop the Fixes tag then? Before the introduction of bpf memory allocator, the allocation flag for per-cpu memory allocation in hash map is GFP_NOWAIT. BPF memory allocator doesn't change that, so I think we could drop the Fixes tag. > > YiFei Zhu > > .
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 0668bcd7c926..016249672b43 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -154,13 +154,17 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) } /* Mostly runs from irq_work except __init phase. */ -static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) +static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic) { struct mem_cgroup *memcg = NULL, *old_memcg; unsigned long flags; + gfp_t gfp; void *obj; int i; + gfp = __GFP_NOWARN | __GFP_ACCOUNT; + gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL; + memcg = get_memcg(c); old_memcg = set_active_memcg(memcg); for (i = 0; i < cnt; i++) { @@ -183,7 +187,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) * will allocate from the current numa node which is what we * want here. */ - obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT); + obj = __alloc(c, node, gfp); if (!obj) break; } @@ -321,7 +325,7 @@ static void bpf_mem_refill(struct irq_work *work) /* irq_work runs on this cpu and kmalloc will allocate * from the current numa node which is what we want here. */ - alloc_bulk(c, c->batch, NUMA_NO_NODE); + alloc_bulk(c, c->batch, NUMA_NO_NODE, true); else if (cnt > c->high_watermark) free_bulk(c); } @@ -367,7 +371,7 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) * prog won't be doing more than 4 map_update_elem from * irq disabled region */ - alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); + alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false); } /* When size != 0 bpf_mem_cache for each cpu.
Sometimes during prefill all precpu chunks are full and atomic __alloc_percpu_gfp would not allocate new chunks. This will cause -ENOMEM immediately upon next unit_alloc. Prefill phase does not actually run in atomic context, so we can use this fact to allocate non-atomically with GFP_KERNEL instead of GFP_NOWAIT. This avoids the immediate -ENOMEM. Unfortunately unit_alloc runs in atomic context, even from map item allocation in syscalls, due to rcu_read_lock, so we can't do non-atomic workarounds in unit_alloc. Fixes: 4ab67149f3c6 ("bpf: Add percpu allocation support to bpf_mem_alloc.") Signed-off-by: YiFei Zhu <zhuyifei@google.com> --- kernel/bpf/memalloc.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-)