diff mbox series

[bpf-next,v2,6/6] selftests/bpf: Cope with 512 bytes limit with bpf_global_percpu_ma

Message ID 20231215001227.3254314-1-yonghong.song@linux.dev (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: Reduce memory usage for bpf_global_percpu_ma | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-43 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-44 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-45 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-46 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-47 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-48 pending Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-49 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-50 pending Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-51 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-52 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-53 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-54 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-55 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-56 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-57 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-7 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-15 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-13 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-14 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / veristat
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/ynl success SINGLE THREAD; Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/cc_maintainers warning 11 maintainers not CCed: kpsingh@kernel.org sdf@google.com martin.lau@linux.dev mykolal@fb.com song@kernel.org houtao1@huawei.com haoluo@google.com jolsa@kernel.org linux-kselftest@vger.kernel.org shuah@kernel.org john.fastabend@gmail.com
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch warning CHECK: spaces preferred around that '*' (ctx:WxV)
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Yonghong Song Dec. 15, 2023, 12:12 a.m. UTC
In the previous patch, the maximum data size for bpf_global_percpu_ma
is 512 bytes. This breaks selftest test_bpf_ma. Let us adjust it
accordingly. Also added a selftest to capture the verification failure
when the allocation size is greater than 512.

Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
---
 .../selftests/bpf/progs/percpu_alloc_fail.c    | 18 ++++++++++++++++++
 .../testing/selftests/bpf/progs/test_bpf_ma.c  |  9 ---------
 2 files changed, 18 insertions(+), 9 deletions(-)

Comments

Hou Tao Dec. 15, 2023, 3:33 a.m. UTC | #1
Hi,

On 12/15/2023 8:12 AM, Yonghong Song wrote:
> In the previous patch, the maximum data size for bpf_global_percpu_ma
> is 512 bytes. This breaks selftest test_bpf_ma. Let us adjust it
> accordingly. Also added a selftest to capture the verification failure
> when the allocation size is greater than 512.
>
> Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
> ---
>  .../selftests/bpf/progs/percpu_alloc_fail.c    | 18 ++++++++++++++++++
>  .../testing/selftests/bpf/progs/test_bpf_ma.c  |  9 ---------
>  2 files changed, 18 insertions(+), 9 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
> index 1a891d30f1fe..f2b8eb2ff76f 100644
> --- a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
> +++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
> @@ -17,6 +17,10 @@ struct val_with_rb_root_t {
>  	struct bpf_spin_lock lock;
>  };
>  
> +struct val_600b_t {
> +	char b[600];
> +};
> +
>  struct elem {
>  	long sum;
>  	struct val_t __percpu_kptr *pc;
> @@ -161,4 +165,18 @@ int BPF_PROG(test_array_map_7)
>  	return 0;
>  }
>  
> +SEC("?fentry.s/bpf_fentry_test1")
> +__failure __msg("bpf_percpu_obj_new type size (600) is greater than 512")
> +int BPF_PROG(test_array_map_8)
> +{
> +	struct val_600b_t __percpu_kptr *p;
> +
> +	p = bpf_percpu_obj_new(struct val_600b_t);
> +	if (!p)
> +		return 0;
> +
> +	bpf_percpu_obj_drop(p);
> +	return 0;
> +}
> +
>  char _license[] SEC("license") = "GPL";
> diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
> index b685a4aba6bd..68cba55eb828 100644
> --- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c
> +++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
> @@ -188,9 +188,6 @@ DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
>  DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
>  DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
>  DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
> -DEFINE_ARRAY_WITH_PERCPU_KPTR(1024);
> -DEFINE_ARRAY_WITH_PERCPU_KPTR(2048);
> -DEFINE_ARRAY_WITH_PERCPU_KPTR(4096);

Considering the update in patch "bpf: Avoid unnecessary extra percpu
memory allocation", the definition of DEFINE_ARRAY_WITH_PERCPU_KPTR()
needs update as well, because for 512-sized per-cpu kptr, the tests only
allocate for (512 - sizeof(void *)) bytes. And we could do
DEFINE_ARRAY_WITH_PERCPU_KPTR(8) test after the update. I could do that
after the patch-set is landed if you don't have time to do that.

A bit of off-topic, but it is still relevant. I have a question about
how to forcibly generate BTF info for struct definition in the test ?
Currently, I have to include  bin_data_xx in the definition of
map_value, but I don't want to increase the size of map_value. I had
tried to use BTF_TYPE_EMIT() in prog just like in linux kernel, but it
didn't work.
>  
>  SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
>  int test_batch_alloc_free(void *ctx)
> @@ -259,9 +256,6 @@ int test_batch_percpu_alloc_free(void *ctx)
>  	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
>  	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
>  	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
> -	CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9);
> -	CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10);
> -	CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11);
>  
>  	return 0;
>  }
> @@ -283,9 +277,6 @@ int test_percpu_free_through_map_free(void *ctx)
>  	CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
>  	CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
>  	CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
> -	CALL_BATCH_PERCPU_ALLOC(1024, 32, 9);
> -	CALL_BATCH_PERCPU_ALLOC(2048, 16, 10);
> -	CALL_BATCH_PERCPU_ALLOC(4096, 8, 11);
>  
>  	return 0;
>  }
Yonghong Song Dec. 15, 2023, 7:38 a.m. UTC | #2
On 12/14/23 7:33 PM, Hou Tao wrote:
> Hi,
>
> On 12/15/2023 8:12 AM, Yonghong Song wrote:
>> In the previous patch, the maximum data size for bpf_global_percpu_ma
>> is 512 bytes. This breaks selftest test_bpf_ma. Let us adjust it
>> accordingly. Also added a selftest to capture the verification failure
>> when the allocation size is greater than 512.
>>
>> Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
>> ---
>>   .../selftests/bpf/progs/percpu_alloc_fail.c    | 18 ++++++++++++++++++
>>   .../testing/selftests/bpf/progs/test_bpf_ma.c  |  9 ---------
>>   2 files changed, 18 insertions(+), 9 deletions(-)
>>
>> diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
>> index 1a891d30f1fe..f2b8eb2ff76f 100644
>> --- a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
>> +++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
>> @@ -17,6 +17,10 @@ struct val_with_rb_root_t {
>>   	struct bpf_spin_lock lock;
>>   };
>>   
>> +struct val_600b_t {
>> +	char b[600];
>> +};
>> +
>>   struct elem {
>>   	long sum;
>>   	struct val_t __percpu_kptr *pc;
>> @@ -161,4 +165,18 @@ int BPF_PROG(test_array_map_7)
>>   	return 0;
>>   }
>>   
>> +SEC("?fentry.s/bpf_fentry_test1")
>> +__failure __msg("bpf_percpu_obj_new type size (600) is greater than 512")
>> +int BPF_PROG(test_array_map_8)
>> +{
>> +	struct val_600b_t __percpu_kptr *p;
>> +
>> +	p = bpf_percpu_obj_new(struct val_600b_t);
>> +	if (!p)
>> +		return 0;
>> +
>> +	bpf_percpu_obj_drop(p);
>> +	return 0;
>> +}
>> +
>>   char _license[] SEC("license") = "GPL";
>> diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
>> index b685a4aba6bd..68cba55eb828 100644
>> --- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c
>> +++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
>> @@ -188,9 +188,6 @@ DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
>>   DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
>>   DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
>>   DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
>> -DEFINE_ARRAY_WITH_PERCPU_KPTR(1024);
>> -DEFINE_ARRAY_WITH_PERCPU_KPTR(2048);
>> -DEFINE_ARRAY_WITH_PERCPU_KPTR(4096);
> Considering the update in patch "bpf: Avoid unnecessary extra percpu
> memory allocation", the definition of DEFINE_ARRAY_WITH_PERCPU_KPTR()
> needs update as well, because for 512-sized per-cpu kptr, the tests only
> allocate for (512 - sizeof(void *)) bytes. And we could do
> DEFINE_ARRAY_WITH_PERCPU_KPTR(8) test after the update. I could do that
> after the patch-set is landed if you don't have time to do that.
>
> A bit of off-topic, but it is still relevant. I have a question about
> how to forcibly generate BTF info for struct definition in the test ?
> Currently, I have to include  bin_data_xx in the definition of
> map_value, but I don't want to increase the size of map_value. I had
> tried to use BTF_TYPE_EMIT() in prog just like in linux kernel, but it
> didn't work.

Since you mentioned the btf generation issue, I did some investigation.
To workaround btf generation issue, we can use the method in
prog_tests/local_kptr_stash.c:

====
/* This is necessary so that LLVM generates BTF for node_data struct
  * If it's not included, a fwd reference for node_data will be generated but
  * no struct. Example BTF of "node" field in map_value when not included:
  *
  * [10] PTR '(anon)' type_id=35
  * [34] FWD 'node_data' fwd_kind=struct
  * [35] TYPE_TAG 'kptr_ref' type_id=34
  *
  * (with no node_data struct defined)
  * Had to do the same w/ bpf_kfunc_call_test_release below
  */
struct node_data *just_here_because_btf_bug;
struct refcounted_node *just_here_because_btf_bug2;
====

I have hacked the test_bpf_ma.c files and something like below
should work to generate btf types:

         struct bin_data_##_size { \
                 char data[_size - sizeof(void *)]; \
         }; \
+       /* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */    \
+       struct bin_data_##_size *__bin_data_##_size; \
         struct map_value_##_size { \
                 struct bin_data_##_size __kptr * data; \
-               /* To emit BTF info for bin_data_xx */ \
-               struct bin_data_##_size not_used; \
         }; \
         struct { \
                 __uint(type, BPF_MAP_TYPE_ARRAY); \
@@ -40,8 +43,12 @@ int pid = 0;
         } array_##_size SEC(".maps")
  
  #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
+       struct percpu_bin_data_##_size { \
+               char data[_size]; \
+       }; \
+       struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
         struct map_value_percpu_##_size { \
-               struct bin_data_##_size __percpu_kptr * data; \
+               struct percpu_bin_data_##_size __percpu_kptr * data; \
         }; \
         struct { \
                 __uint(type, BPF_MAP_TYPE_ARRAY); \

I have a prototype to ensure the type (for percpu kptr) removing these
'- sizeof(void *)' and enabling DEFINE_ARRAY_WITH_PERCPU_KPTR().
Once we resolved the check_obj_size() issue, I can then post v3.

>>   
>>   SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
>>   int test_batch_alloc_free(void *ctx)
>> @@ -259,9 +256,6 @@ int test_batch_percpu_alloc_free(void *ctx)
>>   	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
>>   	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
>>   	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
>> -	CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9);
>> -	CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10);
>> -	CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11);
>>   
>>   	return 0;
>>   }
>> @@ -283,9 +277,6 @@ int test_percpu_free_through_map_free(void *ctx)
>>   	CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
>>   	CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
>>   	CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
>> -	CALL_BATCH_PERCPU_ALLOC(1024, 32, 9);
>> -	CALL_BATCH_PERCPU_ALLOC(2048, 16, 10);
>> -	CALL_BATCH_PERCPU_ALLOC(4096, 8, 11);
>>   
>>   	return 0;
>>   }
Hou Tao Dec. 15, 2023, 7:51 a.m. UTC | #3
Hi,

On 12/15/2023 3:38 PM, Yonghong Song wrote:
>
> On 12/14/23 7:33 PM, Hou Tao wrote:
>> Hi,
>>
>> On 12/15/2023 8:12 AM, Yonghong Song wrote:
>>> In the previous patch, the maximum data size for bpf_global_percpu_ma
>>> is 512 bytes. This breaks selftest test_bpf_ma. Let us adjust it
>>> accordingly. Also added a selftest to capture the verification failure
>>> when the allocation size is greater than 512.
>>>
>>> Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
>>>

SNIP
>>>   DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
>>>   DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
>>>   DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
>>> -DEFINE_ARRAY_WITH_PERCPU_KPTR(1024);
>>> -DEFINE_ARRAY_WITH_PERCPU_KPTR(2048);
>>> -DEFINE_ARRAY_WITH_PERCPU_KPTR(4096);
>> Considering the update in patch "bpf: Avoid unnecessary extra percpu
>> memory allocation", the definition of DEFINE_ARRAY_WITH_PERCPU_KPTR()
>> needs update as well, because for 512-sized per-cpu kptr, the tests only
>> allocate for (512 - sizeof(void *)) bytes. And we could do
>> DEFINE_ARRAY_WITH_PERCPU_KPTR(8) test after the update. I could do that
>> after the patch-set is landed if you don't have time to do that.
>>
>> A bit of off-topic, but it is still relevant. I have a question about
>> how to forcibly generate BTF info for struct definition in the test ?
>> Currently, I have to include  bin_data_xx in the definition of
>> map_value, but I don't want to increase the size of map_value. I had
>> tried to use BTF_TYPE_EMIT() in prog just like in linux kernel, but it
>> didn't work.
>
> Since you mentioned the btf generation issue, I did some investigation.
> To workaround btf generation issue, we can use the method in
> prog_tests/local_kptr_stash.c:
>
> ====
> /* This is necessary so that LLVM generates BTF for node_data struct
>  * If it's not included, a fwd reference for node_data will be
> generated but
>  * no struct. Example BTF of "node" field in map_value when not included:
>  *
>  * [10] PTR '(anon)' type_id=35
>  * [34] FWD 'node_data' fwd_kind=struct
>  * [35] TYPE_TAG 'kptr_ref' type_id=34
>  *
>  * (with no node_data struct defined)
>  * Had to do the same w/ bpf_kfunc_call_test_release below
>  */
> struct node_data *just_here_because_btf_bug;
> struct refcounted_node *just_here_because_btf_bug2;
> ====

Totally missed it. Thanks for pointing it out to me.
>
> I have hacked the test_bpf_ma.c files and something like below
> should work to generate btf types:
>
>         struct bin_data_##_size { \
>                 char data[_size - sizeof(void *)]; \
>         }; \
> +       /* See Commit 5d8d6634ccc, force btf generation for type
> bin_data_##_size */    \
> +       struct bin_data_##_size *__bin_data_##_size; \
>         struct map_value_##_size { \
>                 struct bin_data_##_size __kptr * data; \
> -               /* To emit BTF info for bin_data_xx */ \
> -               struct bin_data_##_size not_used; \
>         }; \
>         struct { \
>                 __uint(type, BPF_MAP_TYPE_ARRAY); \
> @@ -40,8 +43,12 @@ int pid = 0;
>         } array_##_size SEC(".maps")
>  
>  #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
> +       struct percpu_bin_data_##_size { \
> +               char data[_size]; \
> +       }; \
> +       struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
>         struct map_value_percpu_##_size { \
> -               struct bin_data_##_size __percpu_kptr * data; \
> +               struct percpu_bin_data_##_size __percpu_kptr * data; \
>         }; \
>         struct { \
>                 __uint(type, BPF_MAP_TYPE_ARRAY); \
>
> I have a prototype to ensure the type (for percpu kptr) removing these
> '- sizeof(void *)' and enabling DEFINE_ARRAY_WITH_PERCPU_KPTR().
> Once we resolved the check_obj_size() issue, I can then post v3.

Thanks for the update and it looks fine to me. Looking forwards to v3.
>
>>>     SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
>>>   int test_batch_alloc_free(void *ctx)
>>> @@ -259,9 +256,6 @@ int test_batch_percpu_alloc_free(void *ctx)
>>>       CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
>>>       CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
>>>       CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
>>> -    CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9);
>>> -    CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10);
>>> -    CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11);
>>>         return 0;
>>>   }
>>> @@ -283,9 +277,6 @@ int test_percpu_free_through_map_free(void *ctx)
>>>       CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
>>>       CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
>>>       CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
>>> -    CALL_BATCH_PERCPU_ALLOC(1024, 32, 9);
>>> -    CALL_BATCH_PERCPU_ALLOC(2048, 16, 10);
>>> -    CALL_BATCH_PERCPU_ALLOC(4096, 8, 11);
>>>         return 0;
>>>   }
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
index 1a891d30f1fe..f2b8eb2ff76f 100644
--- a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
@@ -17,6 +17,10 @@  struct val_with_rb_root_t {
 	struct bpf_spin_lock lock;
 };
 
+struct val_600b_t {
+	char b[600];
+};
+
 struct elem {
 	long sum;
 	struct val_t __percpu_kptr *pc;
@@ -161,4 +165,18 @@  int BPF_PROG(test_array_map_7)
 	return 0;
 }
 
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type size (600) is greater than 512")
+int BPF_PROG(test_array_map_8)
+{
+	struct val_600b_t __percpu_kptr *p;
+
+	p = bpf_percpu_obj_new(struct val_600b_t);
+	if (!p)
+		return 0;
+
+	bpf_percpu_obj_drop(p);
+	return 0;
+}
+
 char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
index b685a4aba6bd..68cba55eb828 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
@@ -188,9 +188,6 @@  DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
 DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
 DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
 DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
-DEFINE_ARRAY_WITH_PERCPU_KPTR(1024);
-DEFINE_ARRAY_WITH_PERCPU_KPTR(2048);
-DEFINE_ARRAY_WITH_PERCPU_KPTR(4096);
 
 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
 int test_batch_alloc_free(void *ctx)
@@ -259,9 +256,6 @@  int test_batch_percpu_alloc_free(void *ctx)
 	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
 	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
 	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
-	CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9);
-	CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10);
-	CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11);
 
 	return 0;
 }
@@ -283,9 +277,6 @@  int test_percpu_free_through_map_free(void *ctx)
 	CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
 	CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
 	CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
-	CALL_BATCH_PERCPU_ALLOC(1024, 32, 9);
-	CALL_BATCH_PERCPU_ALLOC(2048, 16, 10);
-	CALL_BATCH_PERCPU_ALLOC(4096, 8, 11);
 
 	return 0;
 }