diff mbox series

[bpf-next,v5,5/7] mm, bpf: Use memcg in try_alloc_pages().

Message ID 20250115021746.34691-6-alexei.starovoitov@gmail.com (mailing list archive)
State New
Headers show
Series bpf, mm: Introduce try_alloc_pages() | expand

Commit Message

Alexei Starovoitov Jan. 15, 2025, 2:17 a.m. UTC
From: Alexei Starovoitov <ast@kernel.org>

Unconditionally use __GFP_ACCOUNT in try_alloc_pages().
The caller is responsible to setup memcg correctly.
All BPF memory accounting is memcg based.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 mm/page_alloc.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

Comments

Vlastimil Babka Jan. 15, 2025, 5:51 p.m. UTC | #1
On 1/15/25 03:17, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> Unconditionally use __GFP_ACCOUNT in try_alloc_pages().
> The caller is responsible to setup memcg correctly.
> All BPF memory accounting is memcg based.
> 
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  mm/page_alloc.c | 8 +++++++-
>  1 file changed, 7 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index a9c639e3db91..c87fd6cc3909 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -7131,7 +7131,8 @@ struct page *try_alloc_pages_noprof(int nid, unsigned int order)
>  	 * specify it here to highlight that try_alloc_pages()
>  	 * doesn't want to deplete reserves.
>  	 */
> -	gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC;
> +	gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC
> +			| __GFP_ACCOUNT;
>  	unsigned int alloc_flags = ALLOC_TRYLOCK;
>  	struct alloc_context ac = { };
>  	struct page *page;
> @@ -7174,6 +7175,11 @@ struct page *try_alloc_pages_noprof(int nid, unsigned int order)
>  
>  	/* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
>  
> +	if (memcg_kmem_online() && page &&
> +	    unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
> +		free_pages_nolock(page, order);
> +		page = NULL;
> +	}
>  	trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
>  	kmsan_alloc_page(page, order, alloc_gfp);
>  	return page;
Shakeel Butt Jan. 16, 2025, 12:24 a.m. UTC | #2
On Tue, Jan 14, 2025 at 06:17:44PM -0800, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> Unconditionally use __GFP_ACCOUNT in try_alloc_pages().
> The caller is responsible to setup memcg correctly.
> All BPF memory accounting is memcg based.
> 
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>

Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a9c639e3db91..c87fd6cc3909 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7131,7 +7131,8 @@  struct page *try_alloc_pages_noprof(int nid, unsigned int order)
 	 * specify it here to highlight that try_alloc_pages()
 	 * doesn't want to deplete reserves.
 	 */
-	gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC;
+	gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC
+			| __GFP_ACCOUNT;
 	unsigned int alloc_flags = ALLOC_TRYLOCK;
 	struct alloc_context ac = { };
 	struct page *page;
@@ -7174,6 +7175,11 @@  struct page *try_alloc_pages_noprof(int nid, unsigned int order)
 
 	/* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
 
+	if (memcg_kmem_online() && page &&
+	    unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
+		free_pages_nolock(page, order);
+		page = NULL;
+	}
 	trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
 	kmsan_alloc_page(page, order, alloc_gfp);
 	return page;