diff mbox series

[bpf-next,1/2] mm, bpf: Introduce __GFP_TRYLOCK for opportunistic page allocation

Message ID 20241116014854.55141-1-alexei.starovoitov@gmail.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series [bpf-next,1/2] mm, bpf: Introduce __GFP_TRYLOCK for opportunistic page allocation | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 549 this patch: 549
netdev/build_tools success Errors and warnings before: 0 (+0) this patch: 0 (+0)
netdev/cc_maintainers warning 16 maintainers not CCed: willy@infradead.org mhiramat@kernel.org adrian.hunter@intel.com kan.liang@linux.intel.com namhyung@kernel.org david@redhat.com acme@kernel.org jolsa@kernel.org irogers@google.com alexander.shishkin@linux.intel.com linux-trace-kernel@vger.kernel.org mingo@redhat.com mathieu.desnoyers@efficios.com mark.rutland@arm.com rostedt@goodmis.org linux-perf-users@vger.kernel.org
netdev/build_clang success Errors and warnings before: 1026 this patch: 1026
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 14800 this patch: 14802
netdev/checkpatch warning CHECK: No space is necessary after a cast CHECK: Please use a blank line after function/struct/union/enum declarations WARNING: line length of 81 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 5 this patch: 5
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-17 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-18 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc

Commit Message

Alexei Starovoitov Nov. 16, 2024, 1:48 a.m. UTC
From: Alexei Starovoitov <ast@kernel.org>

Tracing BPF programs execute from tracepoints and kprobes where running
context is unknown, but they need to request additional memory.
The prior workarounds were using pre-allocated memory and BPF specific
freelists to satisfy such allocation requests. Instead, introduce
__GFP_TRYLOCK flag that makes page allocator accessible from any context.
It relies on percpu free list of pages that rmqueue_pcplist() should be
able to pop the page from. If it fails (due to IRQ re-entrancy or list
being empty) then try_alloc_page() attempts to spin_trylock zone->lock
and refill percpu freelist as normal.
BPF program may execute with IRQs disabled and zone->lock is sleeping in RT,
so trylock is the only option.
In theory we can introduce percpu reentrance counter and increment it
every time spin_lock_irqsave(&zone->lock, flags) is used,
but we cannot rely on it. Even if this cpu is not in page_alloc path
the spin_lock_irqsave() is not safe, since BPF prog might be called
from tracepoint where preemption is disabled. So trylock only.

There is no attempt to make free_page() to be accessible from any
context (yet). BPF infrastructure will asynchronously free pages from
such contexts.
memcg is also not charged in try_alloc_page() path. It has to be
done asynchronously to avoid sleeping on
local_lock_irqsave(&memcg_stock.stock_lock, flags).

This is a first step towards supporting BPF requirements in SLUB
and getting rid of bpf_mem_alloc.
That goal was discussed at LSFMM: https://lwn.net/Articles/974138/

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 include/linux/gfp.h            | 17 +++++++++++++++++
 include/linux/gfp_types.h      |  3 +++
 include/trace/events/mmflags.h |  1 +
 mm/internal.h                  |  1 +
 mm/page_alloc.c                | 19 ++++++++++++++++---
 tools/perf/builtin-kmem.c      |  1 +
 6 files changed, 39 insertions(+), 3 deletions(-)

Comments

Peter Zijlstra Nov. 16, 2024, 7:42 p.m. UTC | #1
On Fri, Nov 15, 2024 at 05:48:53PM -0800, Alexei Starovoitov wrote:
> +static inline struct page *try_alloc_page_noprof(int nid)
> +{
> +	/* If spin_locks are not held and interrupts are enabled, use normal path. */
> +	if (preemptible())
> +		return alloc_pages_node_noprof(nid, GFP_NOWAIT | __GFP_ZERO, 0);

This isn't right for PREEMPT_RT, spinlock_t will be preemptible, but you
very much do not want regular allocation calls while inside the
allocator itself for example.

> +	/*
> +	 * Best effort allocation from percpu free list.
> +	 * If it's empty attempt to spin_trylock zone->lock.
> +	 * Do not specify __GFP_KSWAPD_RECLAIM to avoid wakeup_kswapd
> +	 * that may need to grab a lock.
> +	 * Do not specify __GFP_ACCOUNT to avoid local_lock.
> +	 * Do not warn either.
> +	 */
> +	return alloc_pages_node_noprof(nid, __GFP_TRYLOCK | __GFP_NOWARN | __GFP_ZERO, 0);
> +}
Alexei Starovoitov Nov. 16, 2024, 9:13 p.m. UTC | #2
On Sat, Nov 16, 2024 at 11:42 AM Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Fri, Nov 15, 2024 at 05:48:53PM -0800, Alexei Starovoitov wrote:
> > +static inline struct page *try_alloc_page_noprof(int nid)
> > +{
> > +     /* If spin_locks are not held and interrupts are enabled, use normal path. */
> > +     if (preemptible())
> > +             return alloc_pages_node_noprof(nid, GFP_NOWAIT | __GFP_ZERO, 0);
>
> This isn't right for PREEMPT_RT, spinlock_t will be preemptible, but you
> very much do not want regular allocation calls while inside the
> allocator itself for example.

I'm aware that spinlocks are preemptible in RT.
Here is my understanding of why the above is correct...
- preemptible() means that IRQs are not disabled and preempt_count == 0.

- All page alloc operations are protected either by
pcp_spin_trylock() or by spin_lock_irqsave(&zone->lock, flags)
or both together.

- In non-RT spin_lock_irqsave disables IRQs, so preemptible()
check guarantees that we're not holding zone->lock.
The page alloc logic can hold pcp lock when try_alloc_page() is called,
but it's always using pcp_trylock, so it's still ok to call it
with GFP_NOWAIT. pcp trylock will fail and zone->lock will proceed
to acquire zone->lock.

- In RT spin_lock_irqsave doesn't disable IRQs despite its name.
It calls rt_spin_lock() which calls rcu_read_lock()
which increments preempt_count.
So preemptible() checks guards try_alloc_page() from re-entering
in zone->lock protected code.
And pcp_trylock is trylock.

So I believe the above is safe.

> > +     /*
> > +      * Best effort allocation from percpu free list.
> > +      * If it's empty attempt to spin_trylock zone->lock.
> > +      * Do not specify __GFP_KSWAPD_RECLAIM to avoid wakeup_kswapd
> > +      * that may need to grab a lock.
> > +      * Do not specify __GFP_ACCOUNT to avoid local_lock.
> > +      * Do not warn either.
> > +      */
> > +     return alloc_pages_node_noprof(nid, __GFP_TRYLOCK | __GFP_NOWARN | __GFP_ZERO, 0);
> > +}
Alexei Starovoitov Nov. 16, 2024, 9:34 p.m. UTC | #3
On Sat, Nov 16, 2024 at 1:13 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Sat, Nov 16, 2024 at 11:42 AM Peter Zijlstra <peterz@infradead.org> wrote:
> >
> > On Fri, Nov 15, 2024 at 05:48:53PM -0800, Alexei Starovoitov wrote:
> > > +static inline struct page *try_alloc_page_noprof(int nid)
> > > +{
> > > +     /* If spin_locks are not held and interrupts are enabled, use normal path. */
> > > +     if (preemptible())
> > > +             return alloc_pages_node_noprof(nid, GFP_NOWAIT | __GFP_ZERO, 0);
> >
> > This isn't right for PREEMPT_RT, spinlock_t will be preemptible, but you
> > very much do not want regular allocation calls while inside the
> > allocator itself for example.
>
> I'm aware that spinlocks are preemptible in RT.
> Here is my understanding of why the above is correct...
> - preemptible() means that IRQs are not disabled and preempt_count == 0.
>
> - All page alloc operations are protected either by
> pcp_spin_trylock() or by spin_lock_irqsave(&zone->lock, flags)
> or both together.
>
> - In non-RT spin_lock_irqsave disables IRQs, so preemptible()
> check guarantees that we're not holding zone->lock.
> The page alloc logic can hold pcp lock when try_alloc_page() is called,
> but it's always using pcp_trylock, so it's still ok to call it
> with GFP_NOWAIT. pcp trylock will fail and zone->lock will proceed
> to acquire zone->lock.
>
> - In RT spin_lock_irqsave doesn't disable IRQs despite its name.
> It calls rt_spin_lock() which calls rcu_read_lock()
> which increments preempt_count.

The maze of ifdef-s beat me :(
It doesn't increment in PREEMPT_RCU.
Need an additional check then. hmm.
Alexei Starovoitov Nov. 16, 2024, 9:41 p.m. UTC | #4
On Sat, Nov 16, 2024 at 1:34 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Sat, Nov 16, 2024 at 1:13 PM Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
> >
> > On Sat, Nov 16, 2024 at 11:42 AM Peter Zijlstra <peterz@infradead.org> wrote:
> > >
> > > On Fri, Nov 15, 2024 at 05:48:53PM -0800, Alexei Starovoitov wrote:
> > > > +static inline struct page *try_alloc_page_noprof(int nid)
> > > > +{
> > > > +     /* If spin_locks are not held and interrupts are enabled, use normal path. */
> > > > +     if (preemptible())
> > > > +             return alloc_pages_node_noprof(nid, GFP_NOWAIT | __GFP_ZERO, 0);
> > >
> > > This isn't right for PREEMPT_RT, spinlock_t will be preemptible, but you
> > > very much do not want regular allocation calls while inside the
> > > allocator itself for example.
> >
> > I'm aware that spinlocks are preemptible in RT.
> > Here is my understanding of why the above is correct...
> > - preemptible() means that IRQs are not disabled and preempt_count == 0.
> >
> > - All page alloc operations are protected either by
> > pcp_spin_trylock() or by spin_lock_irqsave(&zone->lock, flags)
> > or both together.
> >
> > - In non-RT spin_lock_irqsave disables IRQs, so preemptible()
> > check guarantees that we're not holding zone->lock.
> > The page alloc logic can hold pcp lock when try_alloc_page() is called,
> > but it's always using pcp_trylock, so it's still ok to call it
> > with GFP_NOWAIT. pcp trylock will fail and zone->lock will proceed
> > to acquire zone->lock.
> >
> > - In RT spin_lock_irqsave doesn't disable IRQs despite its name.
> > It calls rt_spin_lock() which calls rcu_read_lock()
> > which increments preempt_count.
>
> The maze of ifdef-s beat me :(
> It doesn't increment in PREEMPT_RCU.
> Need an additional check then. hmm.

Like:
if (preemptible() && !rcu_preempt_depth())
  return alloc_pages_node_noprof(nid, GFP_NOWAIT | __GFP_ZERO, 0);

Not pretty, but should do.
wdyt?
Peter Zijlstra Nov. 17, 2024, 10:50 a.m. UTC | #5
On Sat, Nov 16, 2024 at 01:13:20PM -0800, Alexei Starovoitov wrote:
> On Sat, Nov 16, 2024 at 11:42 AM Peter Zijlstra <peterz@infradead.org> wrote:
> >
> > On Fri, Nov 15, 2024 at 05:48:53PM -0800, Alexei Starovoitov wrote:
> > > +static inline struct page *try_alloc_page_noprof(int nid)
> > > +{
> > > +     /* If spin_locks are not held and interrupts are enabled, use normal path. */
> > > +     if (preemptible())
> > > +             return alloc_pages_node_noprof(nid, GFP_NOWAIT | __GFP_ZERO, 0);
> >
> > This isn't right for PREEMPT_RT, spinlock_t will be preemptible, but you
> > very much do not want regular allocation calls while inside the
> > allocator itself for example.
> 
> I'm aware that spinlocks are preemptible in RT.
> Here is my understanding of why the above is correct...
> - preemptible() means that IRQs are not disabled and preempt_count == 0.
> 
> - All page alloc operations are protected either by
> pcp_spin_trylock() or by spin_lock_irqsave(&zone->lock, flags)
> or both together.
> 
> - In non-RT spin_lock_irqsave disables IRQs, so preemptible()
> check guarantees that we're not holding zone->lock.
> The page alloc logic can hold pcp lock when try_alloc_page() is called,
> but it's always using pcp_trylock, so it's still ok to call it
> with GFP_NOWAIT. pcp trylock will fail and zone->lock will proceed
> to acquire zone->lock.
> 
> - In RT spin_lock_irqsave doesn't disable IRQs despite its name.
> It calls rt_spin_lock() which calls rcu_read_lock()
> which increments preempt_count.

It does not on PREEMPT_RCU, which is mandatory for PREEMPT_RT.
Peter Zijlstra Nov. 17, 2024, 10:54 a.m. UTC | #6
On Sat, Nov 16, 2024 at 01:41:41PM -0800, Alexei Starovoitov wrote:

> > The maze of ifdef-s beat me :(
> > It doesn't increment in PREEMPT_RCU.
> > Need an additional check then. hmm.
> 
> Like:
> if (preemptible() && !rcu_preempt_depth())
>   return alloc_pages_node_noprof(nid, GFP_NOWAIT | __GFP_ZERO, 0);
> 
> Not pretty, but should do.

Yeah, I suppose that should work. Thanks!
Vlastimil Babka Nov. 17, 2024, 10:54 a.m. UTC | #7
On 11/16/24 02:48, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> Tracing BPF programs execute from tracepoints and kprobes where running
> context is unknown, but they need to request additional memory.
> The prior workarounds were using pre-allocated memory and BPF specific
> freelists to satisfy such allocation requests. Instead, introduce
> __GFP_TRYLOCK flag that makes page allocator accessible from any context.
> It relies on percpu free list of pages that rmqueue_pcplist() should be
> able to pop the page from. If it fails (due to IRQ re-entrancy or list
> being empty) then try_alloc_page() attempts to spin_trylock zone->lock
> and refill percpu freelist as normal.
> BPF program may execute with IRQs disabled and zone->lock is sleeping in RT,
> so trylock is the only option.
> In theory we can introduce percpu reentrance counter and increment it
> every time spin_lock_irqsave(&zone->lock, flags) is used,
> but we cannot rely on it. Even if this cpu is not in page_alloc path
> the spin_lock_irqsave() is not safe, since BPF prog might be called
> from tracepoint where preemption is disabled. So trylock only.
> 
> There is no attempt to make free_page() to be accessible from any
> context (yet). BPF infrastructure will asynchronously free pages from
> such contexts.
> memcg is also not charged in try_alloc_page() path. It has to be
> done asynchronously to avoid sleeping on
> local_lock_irqsave(&memcg_stock.stock_lock, flags).
> 
> This is a first step towards supporting BPF requirements in SLUB
> and getting rid of bpf_mem_alloc.
> That goal was discussed at LSFMM: https://lwn.net/Articles/974138/

Thanks for looking into this. I agree that something like __GFP_TRYLOCK
would be necessary to distinguish those allocation contexts. But I'm
wondering if the page allocator is the best place to start (or even
necessary in the end) if the goal is to replace the kmalloc-sized
allocations in bpf and not page sized? SLUB could have preallocated slab
pages to not call into the page allocator in such situations?

I've posted the first SLUB sheaves RFC this week [1]. The immediate
motivation was different, but I did mention there this could perhaps become
a basis for the bpf_mem_alloc replacement too. I'd imagine something like a
set of kmem_buckets with sheaves enabled and either a flag like
__GFP_TRYLOCK or a different variant of kmalloc() to only try using the
sheaves. Didn't Cc you/bpf as if seemed too vague yet, but guess I should have.

[1]
https://lore.kernel.org/all/20241112-slub-percpu-caches-v1-0-ddc0bdc27e05@suse.cz/#t

> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> ---
>  include/linux/gfp.h            | 17 +++++++++++++++++
>  include/linux/gfp_types.h      |  3 +++
>  include/trace/events/mmflags.h |  1 +
>  mm/internal.h                  |  1 +
>  mm/page_alloc.c                | 19 ++++++++++++++++---
>  tools/perf/builtin-kmem.c      |  1 +
>  6 files changed, 39 insertions(+), 3 deletions(-)
> 
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index a951de920e20..319d8906ef3f 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -347,6 +347,23 @@ static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
>  }
>  #define alloc_page_vma(...)			alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
>  
> +static inline struct page *try_alloc_page_noprof(int nid)
> +{
> +	/* If spin_locks are not held and interrupts are enabled, use normal path. */
> +	if (preemptible())
> +		return alloc_pages_node_noprof(nid, GFP_NOWAIT | __GFP_ZERO, 0);
> +	/*
> +	 * Best effort allocation from percpu free list.
> +	 * If it's empty attempt to spin_trylock zone->lock.
> +	 * Do not specify __GFP_KSWAPD_RECLAIM to avoid wakeup_kswapd
> +	 * that may need to grab a lock.
> +	 * Do not specify __GFP_ACCOUNT to avoid local_lock.
> +	 * Do not warn either.
> +	 */
> +	return alloc_pages_node_noprof(nid, __GFP_TRYLOCK | __GFP_NOWARN | __GFP_ZERO, 0);
> +}
> +#define try_alloc_page(nid)			alloc_hooks(try_alloc_page_noprof(nid))
> +
>  extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
>  #define __get_free_pages(...)			alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
>  
> diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
> index 65db9349f905..72b385a7888d 100644
> --- a/include/linux/gfp_types.h
> +++ b/include/linux/gfp_types.h
> @@ -48,6 +48,7 @@ enum {
>  	___GFP_THISNODE_BIT,
>  	___GFP_ACCOUNT_BIT,
>  	___GFP_ZEROTAGS_BIT,
> +	___GFP_TRYLOCK_BIT,
>  #ifdef CONFIG_KASAN_HW_TAGS
>  	___GFP_SKIP_ZERO_BIT,
>  	___GFP_SKIP_KASAN_BIT,
> @@ -86,6 +87,7 @@ enum {
>  #define ___GFP_THISNODE		BIT(___GFP_THISNODE_BIT)
>  #define ___GFP_ACCOUNT		BIT(___GFP_ACCOUNT_BIT)
>  #define ___GFP_ZEROTAGS		BIT(___GFP_ZEROTAGS_BIT)
> +#define ___GFP_TRYLOCK		BIT(___GFP_TRYLOCK_BIT)
>  #ifdef CONFIG_KASAN_HW_TAGS
>  #define ___GFP_SKIP_ZERO	BIT(___GFP_SKIP_ZERO_BIT)
>  #define ___GFP_SKIP_KASAN	BIT(___GFP_SKIP_KASAN_BIT)
> @@ -293,6 +295,7 @@ enum {
>  #define __GFP_COMP	((__force gfp_t)___GFP_COMP)
>  #define __GFP_ZERO	((__force gfp_t)___GFP_ZERO)
>  #define __GFP_ZEROTAGS	((__force gfp_t)___GFP_ZEROTAGS)
> +#define __GFP_TRYLOCK	((__force gfp_t)___GFP_TRYLOCK)
>  #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
>  #define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN)
>  
> diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
> index bb8a59c6caa2..592c93ee5f35 100644
> --- a/include/trace/events/mmflags.h
> +++ b/include/trace/events/mmflags.h
> @@ -50,6 +50,7 @@
>  	gfpflag_string(__GFP_RECLAIM),		\
>  	gfpflag_string(__GFP_DIRECT_RECLAIM),	\
>  	gfpflag_string(__GFP_KSWAPD_RECLAIM),	\
> +	gfpflag_string(__GFP_TRYLOCK),		\
>  	gfpflag_string(__GFP_ZEROTAGS)
>  
>  #ifdef CONFIG_KASAN_HW_TAGS
> diff --git a/mm/internal.h b/mm/internal.h
> index 64c2eb0b160e..c1b08e95a63b 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -1173,6 +1173,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
>  #endif
>  #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
>  #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
> +#define ALLOC_TRYLOCK		0x1000000 /* Only use spin_trylock in allocation path */
>  
>  /* Flags that allow allocations below the min watermark. */
>  #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 216fbbfbedcf..71fed4f5bd0c 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2304,7 +2304,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
>  	unsigned long flags;
>  	int i;
>  
> -	spin_lock_irqsave(&zone->lock, flags);
> +	if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
> +		if (!spin_trylock_irqsave(&zone->lock, flags))
> +			return 0;
> +	} else {
> +		spin_lock_irqsave(&zone->lock, flags);
> +	}
>  	for (i = 0; i < count; ++i) {
>  		struct page *page = __rmqueue(zone, order, migratetype,
>  								alloc_flags);
> @@ -2904,7 +2909,12 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
>  
>  	do {
>  		page = NULL;
> -		spin_lock_irqsave(&zone->lock, flags);
> +		if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
> +			if (!spin_trylock_irqsave(&zone->lock, flags))
> +				return 0;
> +		} else {
> +			spin_lock_irqsave(&zone->lock, flags);
> +		}
>  		if (alloc_flags & ALLOC_HIGHATOMIC)
>  			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
>  		if (!page) {
> @@ -4001,6 +4011,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
>  	 */
>  	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
>  	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
> +	BUILD_BUG_ON(__GFP_TRYLOCK != (__force gfp_t) ALLOC_TRYLOCK);
>  
>  	/*
>  	 * The caller may dip into page reserves a bit more if the caller
> @@ -4009,7 +4020,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
>  	 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
>  	 */
>  	alloc_flags |= (__force int)
> -		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
> +		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM | __GFP_TRYLOCK));
>  
>  	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
>  		/*
> @@ -4509,6 +4520,8 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>  
>  	might_alloc(gfp_mask);
>  
> +	*alloc_flags |= (__force int) (gfp_mask & __GFP_TRYLOCK);
> +
>  	if (should_fail_alloc_page(gfp_mask, order))
>  		return false;
>  
> diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
> index a756147e2eec..d245ff60d2a6 100644
> --- a/tools/perf/builtin-kmem.c
> +++ b/tools/perf/builtin-kmem.c
> @@ -682,6 +682,7 @@ static const struct {
>  	{ "__GFP_RECLAIM",		"R" },
>  	{ "__GFP_DIRECT_RECLAIM",	"DR" },
>  	{ "__GFP_KSWAPD_RECLAIM",	"KR" },
> +	{ "__GFP_TRYLOCK",		"TL" },
>  };
>  
>  static size_t max_gfp_len;
Alexei Starovoitov Nov. 19, 2024, 12:40 a.m. UTC | #8
On Sun, Nov 17, 2024 at 2:54 AM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> On 11/16/24 02:48, Alexei Starovoitov wrote:
> > From: Alexei Starovoitov <ast@kernel.org>
> >
> > Tracing BPF programs execute from tracepoints and kprobes where running
> > context is unknown, but they need to request additional memory.
> > The prior workarounds were using pre-allocated memory and BPF specific
> > freelists to satisfy such allocation requests. Instead, introduce
> > __GFP_TRYLOCK flag that makes page allocator accessible from any context.
> > It relies on percpu free list of pages that rmqueue_pcplist() should be
> > able to pop the page from. If it fails (due to IRQ re-entrancy or list
> > being empty) then try_alloc_page() attempts to spin_trylock zone->lock
> > and refill percpu freelist as normal.
> > BPF program may execute with IRQs disabled and zone->lock is sleeping in RT,
> > so trylock is the only option.
> > In theory we can introduce percpu reentrance counter and increment it
> > every time spin_lock_irqsave(&zone->lock, flags) is used,
> > but we cannot rely on it. Even if this cpu is not in page_alloc path
> > the spin_lock_irqsave() is not safe, since BPF prog might be called
> > from tracepoint where preemption is disabled. So trylock only.
> >
> > There is no attempt to make free_page() to be accessible from any
> > context (yet). BPF infrastructure will asynchronously free pages from
> > such contexts.
> > memcg is also not charged in try_alloc_page() path. It has to be
> > done asynchronously to avoid sleeping on
> > local_lock_irqsave(&memcg_stock.stock_lock, flags).
> >
> > This is a first step towards supporting BPF requirements in SLUB
> > and getting rid of bpf_mem_alloc.
> > That goal was discussed at LSFMM: https://lwn.net/Articles/974138/
>
> Thanks for looking into this. I agree that something like __GFP_TRYLOCK
> would be necessary to distinguish those allocation contexts. But I'm
> wondering if the page allocator is the best place to start (or even
> necessary in the end)

It's necessary. More below.

> if the goal is to replace the kmalloc-sized
> allocations in bpf and not page sized?

bpf_ma has support for page sized alloc, but freelist of one page
isn't practical. bpf side needs to be able to request many
pages one page at a time. There is no need for order >= 1 allocs
and never going to be. One page at a time only.

> SLUB could have preallocated slab
> pages to not call into the page allocator in such situations?

prealloc cannot be sized. In the early days of bpf we preallocated
everything. When bpf map is created it has 'max_entries' limit.
So we know how much to preallocate, but since maps got big
(in networking use cases there are multiple muti-gigabytes maps)
the full prealloc is a waste of memory. Hence bpf_ma was added to
allocate when necessary. So far it's working ok for
small allocations and free list watermark logic seems to be enough.
But it doesn't work for page sized allocs.
So for bpf arena we require sleepable context to populate pages
and users are already complaining that we merely shifted a problem
on to them. Now bpf progs have to preallocate in sleepable context,
stash memory somewhere and use it later.
And program author cannot know beforehand how much to preallocate.
How many flows will the network load balancer see?

> I've posted the first SLUB sheaves RFC this week [1]. The immediate
> motivation was different, but I did mention there this could perhaps become
> a basis for the bpf_mem_alloc replacement too. I'd imagine something like a
> set of kmem_buckets with sheaves enabled and either a flag like
> __GFP_TRYLOCK or a different variant of kmalloc() to only try using the
> sheaves. Didn't Cc you/bpf as if seemed too vague yet, but guess I should have.
>
> [1]
> https://lore.kernel.org/all/20241112-slub-percpu-caches-v1-0-ddc0bdc27e05@suse.cz/#t

Thanks for the link.
I see why you think this cache array may help maple_tree
(though I'm sceptical without seeing performance numbers),
but I don't see how it helps bpf.
bpf_ma is not about performance. We tried to make it fast,
but it wasn't a goal. The main objective is to have a reasonable
chance of getting memory in any context.
afaict sheaves don't help bpf_ma.
In the patch
[PATCH RFC 5/6] mm, slub: cheaper locking for percpu sheaves
you're calling it cpu_sheaves_trylock(),
but then you do:
local_lock(&ptr->member.llock);
how is this a try lock?

This lock breaks slab re-entrancy and lockdep will complain
if it sees local_lock() and raw spinlock is already held.
So the way sheaves are coded they're no go for bpf use cases.

In this GFP_TRYLOOCK patch see this comment:
+  * Do not specify __GFP_ACCOUNT to avoid local_lock.

That's an important part. No locks including local_locks
should be attempted to be taken with GFP_TRYLOCK.

The plan to replace bpf_ma has this rough steps:
1. try_alloc_page_noprof() that uses try lock only. This patch.
2. kmalloc() support for GFP_TRYLOCK that does its
double cmpxchg and when new_slab() is needed it's called with
TRYLOCK too.
So all slub and page allocs are not blocking.
The __free_pages() part is a bit tricky, since it needs
to add a page to the free list, but it has to use trylock and
there are no gfp flags. I'm thinking to use llist when trylock
fails and truly add the page to the freelist on the next
call to free_pages that doesn't have TRYLOCK flag.
diff mbox series

Patch

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index a951de920e20..319d8906ef3f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -347,6 +347,23 @@  static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
 }
 #define alloc_page_vma(...)			alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
 
+static inline struct page *try_alloc_page_noprof(int nid)
+{
+	/* If spin_locks are not held and interrupts are enabled, use normal path. */
+	if (preemptible())
+		return alloc_pages_node_noprof(nid, GFP_NOWAIT | __GFP_ZERO, 0);
+	/*
+	 * Best effort allocation from percpu free list.
+	 * If it's empty attempt to spin_trylock zone->lock.
+	 * Do not specify __GFP_KSWAPD_RECLAIM to avoid wakeup_kswapd
+	 * that may need to grab a lock.
+	 * Do not specify __GFP_ACCOUNT to avoid local_lock.
+	 * Do not warn either.
+	 */
+	return alloc_pages_node_noprof(nid, __GFP_TRYLOCK | __GFP_NOWARN | __GFP_ZERO, 0);
+}
+#define try_alloc_page(nid)			alloc_hooks(try_alloc_page_noprof(nid))
+
 extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
 #define __get_free_pages(...)			alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
 
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 65db9349f905..72b385a7888d 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -48,6 +48,7 @@  enum {
 	___GFP_THISNODE_BIT,
 	___GFP_ACCOUNT_BIT,
 	___GFP_ZEROTAGS_BIT,
+	___GFP_TRYLOCK_BIT,
 #ifdef CONFIG_KASAN_HW_TAGS
 	___GFP_SKIP_ZERO_BIT,
 	___GFP_SKIP_KASAN_BIT,
@@ -86,6 +87,7 @@  enum {
 #define ___GFP_THISNODE		BIT(___GFP_THISNODE_BIT)
 #define ___GFP_ACCOUNT		BIT(___GFP_ACCOUNT_BIT)
 #define ___GFP_ZEROTAGS		BIT(___GFP_ZEROTAGS_BIT)
+#define ___GFP_TRYLOCK		BIT(___GFP_TRYLOCK_BIT)
 #ifdef CONFIG_KASAN_HW_TAGS
 #define ___GFP_SKIP_ZERO	BIT(___GFP_SKIP_ZERO_BIT)
 #define ___GFP_SKIP_KASAN	BIT(___GFP_SKIP_KASAN_BIT)
@@ -293,6 +295,7 @@  enum {
 #define __GFP_COMP	((__force gfp_t)___GFP_COMP)
 #define __GFP_ZERO	((__force gfp_t)___GFP_ZERO)
 #define __GFP_ZEROTAGS	((__force gfp_t)___GFP_ZEROTAGS)
+#define __GFP_TRYLOCK	((__force gfp_t)___GFP_TRYLOCK)
 #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
 #define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN)
 
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index bb8a59c6caa2..592c93ee5f35 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -50,6 +50,7 @@ 
 	gfpflag_string(__GFP_RECLAIM),		\
 	gfpflag_string(__GFP_DIRECT_RECLAIM),	\
 	gfpflag_string(__GFP_KSWAPD_RECLAIM),	\
+	gfpflag_string(__GFP_TRYLOCK),		\
 	gfpflag_string(__GFP_ZEROTAGS)
 
 #ifdef CONFIG_KASAN_HW_TAGS
diff --git a/mm/internal.h b/mm/internal.h
index 64c2eb0b160e..c1b08e95a63b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1173,6 +1173,7 @@  unsigned int reclaim_clean_pages_from_list(struct zone *zone,
 #endif
 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
+#define ALLOC_TRYLOCK		0x1000000 /* Only use spin_trylock in allocation path */
 
 /* Flags that allow allocations below the min watermark. */
 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 216fbbfbedcf..71fed4f5bd0c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2304,7 +2304,12 @@  static int rmqueue_bulk(struct zone *zone, unsigned int order,
 	unsigned long flags;
 	int i;
 
-	spin_lock_irqsave(&zone->lock, flags);
+	if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
+		if (!spin_trylock_irqsave(&zone->lock, flags))
+			return 0;
+	} else {
+		spin_lock_irqsave(&zone->lock, flags);
+	}
 	for (i = 0; i < count; ++i) {
 		struct page *page = __rmqueue(zone, order, migratetype,
 								alloc_flags);
@@ -2904,7 +2909,12 @@  struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
 
 	do {
 		page = NULL;
-		spin_lock_irqsave(&zone->lock, flags);
+		if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
+			if (!spin_trylock_irqsave(&zone->lock, flags))
+				return 0;
+		} else {
+			spin_lock_irqsave(&zone->lock, flags);
+		}
 		if (alloc_flags & ALLOC_HIGHATOMIC)
 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
 		if (!page) {
@@ -4001,6 +4011,7 @@  gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
 	 */
 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
 	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
+	BUILD_BUG_ON(__GFP_TRYLOCK != (__force gfp_t) ALLOC_TRYLOCK);
 
 	/*
 	 * The caller may dip into page reserves a bit more if the caller
@@ -4009,7 +4020,7 @@  gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
 	 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
 	 */
 	alloc_flags |= (__force int)
-		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
+		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM | __GFP_TRYLOCK));
 
 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
 		/*
@@ -4509,6 +4520,8 @@  static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
 
 	might_alloc(gfp_mask);
 
+	*alloc_flags |= (__force int) (gfp_mask & __GFP_TRYLOCK);
+
 	if (should_fail_alloc_page(gfp_mask, order))
 		return false;
 
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index a756147e2eec..d245ff60d2a6 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -682,6 +682,7 @@  static const struct {
 	{ "__GFP_RECLAIM",		"R" },
 	{ "__GFP_DIRECT_RECLAIM",	"DR" },
 	{ "__GFP_KSWAPD_RECLAIM",	"KR" },
+	{ "__GFP_TRYLOCK",		"TL" },
 };
 
 static size_t max_gfp_len;