diff mbox series

[bpf] bpf: Fix release of page_pool in BPF_PROG_RUN

Message ID 20220409213053.3117305-1-toke@redhat.com (mailing list archive)
State Accepted
Commit 425d239379db03d514cb1c476bfe7c320bb89dfc
Delegated to: BPF
Headers show
Series [bpf] bpf: Fix release of page_pool in BPF_PROG_RUN | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf
netdev/fixes_present success Fixes tag present in non-next series
netdev/subject_prefix success Link
netdev/cover_letter success Single patches do not need cover letters
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 26 this patch: 26
netdev/cc_maintainers success CCed 15 of 15 maintainers
netdev/build_clang success Errors and warnings before: 9 this patch: 9
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 26 this patch: 26
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 29 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-PR success PR summary
bpf/vmtest-bpf-VM_Test-1 success Logs for Kernel LATEST on ubuntu-latest + selftests
bpf/vmtest-bpf-VM_Test-2 success Logs for Kernel LATEST on z15 + selftests

Commit Message

Toke Høiland-Jørgensen April 9, 2022, 9:30 p.m. UTC
The live packet mode in BPF_PROG_RUN allocates a page_pool instance for
each test run instance and uses it for the packet data. On setup it creates
the page_pool, and calls xdp_reg_mem_model() to allow pages to be returned
properly from the XDP data path. However, xdp_reg_mem_model() also raises
the reference count of the page_pool itself, so the single
page_pool_destroy() count on teardown was not enough to actually release
the pool. To fix this, add an additional xdp_unreg_mem_model() call on
teardown.

Fixes: b530e9e1063e ("bpf: Add "live packet" mode for XDP in BPF_PROG_RUN")
Reported-by: Freysteinn Alfredsson <freysteinn.alfredsson@kau.se>
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
---
 net/bpf/test_run.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

Comments

Song Liu April 11, 2022, 5:19 a.m. UTC | #1
On Sat, Apr 9, 2022 at 2:31 PM Toke Høiland-Jørgensen <toke@redhat.com> wrote:
>
> The live packet mode in BPF_PROG_RUN allocates a page_pool instance for
> each test run instance and uses it for the packet data. On setup it creates
> the page_pool, and calls xdp_reg_mem_model() to allow pages to be returned
> properly from the XDP data path. However, xdp_reg_mem_model() also raises
> the reference count of the page_pool itself, so the single
> page_pool_destroy() count on teardown was not enough to actually release
> the pool. To fix this, add an additional xdp_unreg_mem_model() call on
> teardown.
>
> Fixes: b530e9e1063e ("bpf: Add "live packet" mode for XDP in BPF_PROG_RUN")
> Reported-by: Freysteinn Alfredsson <freysteinn.alfredsson@kau.se>
> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>

Acked-by: Song Liu <songliubraving@fb.com>

> ---
>  net/bpf/test_run.c | 5 +++--
>  1 file changed, 3 insertions(+), 2 deletions(-)
>
> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> index e7b9c2636d10..af709c182674 100644
> --- a/net/bpf/test_run.c
> +++ b/net/bpf/test_run.c
> @@ -108,6 +108,7 @@ struct xdp_test_data {
>         struct page_pool *pp;
>         struct xdp_frame **frames;
>         struct sk_buff **skbs;
> +       struct xdp_mem_info mem;
>         u32 batch_size;
>         u32 frame_cnt;
>  };
> @@ -147,7 +148,6 @@ static void xdp_test_run_init_page(struct page *page, void *arg)
>
>  static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
>  {
> -       struct xdp_mem_info mem = {};
>         struct page_pool *pp;
>         int err = -ENOMEM;
>         struct page_pool_params pp_params = {
> @@ -174,7 +174,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
>         }
>
>         /* will copy 'mem.id' into pp->xdp_mem_id */
> -       err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp);
> +       err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
>         if (err)
>                 goto err_mmodel;
>
> @@ -202,6 +202,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
>
>  static void xdp_test_run_teardown(struct xdp_test_data *xdp)
>  {
> +       xdp_unreg_mem_model(&xdp->mem);
>         page_pool_destroy(xdp->pp);
>         kfree(xdp->frames);
>         kfree(xdp->skbs);
> --
> 2.35.1
>
patchwork-bot+netdevbpf@kernel.org April 11, 2022, 3:40 p.m. UTC | #2
Hello:

This patch was applied to bpf/bpf.git (master)
by Daniel Borkmann <daniel@iogearbox.net>:

On Sat,  9 Apr 2022 23:30:53 +0200 you wrote:
> The live packet mode in BPF_PROG_RUN allocates a page_pool instance for
> each test run instance and uses it for the packet data. On setup it creates
> the page_pool, and calls xdp_reg_mem_model() to allow pages to be returned
> properly from the XDP data path. However, xdp_reg_mem_model() also raises
> the reference count of the page_pool itself, so the single
> page_pool_destroy() count on teardown was not enough to actually release
> the pool. To fix this, add an additional xdp_unreg_mem_model() call on
> teardown.
> 
> [...]

Here is the summary with links:
  - [bpf] bpf: Fix release of page_pool in BPF_PROG_RUN
    https://git.kernel.org/bpf/bpf/c/425d239379db

You are awesome, thank you!
diff mbox series

Patch

diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index e7b9c2636d10..af709c182674 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -108,6 +108,7 @@  struct xdp_test_data {
 	struct page_pool *pp;
 	struct xdp_frame **frames;
 	struct sk_buff **skbs;
+	struct xdp_mem_info mem;
 	u32 batch_size;
 	u32 frame_cnt;
 };
@@ -147,7 +148,6 @@  static void xdp_test_run_init_page(struct page *page, void *arg)
 
 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
 {
-	struct xdp_mem_info mem = {};
 	struct page_pool *pp;
 	int err = -ENOMEM;
 	struct page_pool_params pp_params = {
@@ -174,7 +174,7 @@  static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
 	}
 
 	/* will copy 'mem.id' into pp->xdp_mem_id */
-	err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp);
+	err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
 	if (err)
 		goto err_mmodel;
 
@@ -202,6 +202,7 @@  static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
 
 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
 {
+	xdp_unreg_mem_model(&xdp->mem);
 	page_pool_destroy(xdp->pp);
 	kfree(xdp->frames);
 	kfree(xdp->skbs);