Message ID | 20230327170126.406044-2-urezki@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v3,1/2] mm: vmalloc: Remove a global vmap_blocks xarray | expand |
On Mon, Mar 27, 2023 at 07:01:26PM +0200, Uladzislau Rezki (Sony) wrote: > Add vm_map_ram()/vm_unmap_ram() test case to our stress test-suite. > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > --- > lib/test_vmalloc.c | 41 +++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 41 insertions(+) > > diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c > index cd2bdba6d3ed..6633eda4cd4d 100644 > --- a/lib/test_vmalloc.c > +++ b/lib/test_vmalloc.c > @@ -53,6 +53,7 @@ __param(int, run_test_mask, INT_MAX, > "\t\tid: 128, name: pcpu_alloc_test\n" > "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" > "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" > + "\t\tid: 1024, name: vm_map_ram_test\n" > /* Add a new test case description here. */ > ); > > @@ -358,6 +359,45 @@ kvfree_rcu_2_arg_vmalloc_test(void) > return 0; > } > > +static int > +vm_map_ram_test(void) > +{ > + unsigned int map_nr_pages; > + unsigned char *v_ptr; > + unsigned char *p_ptr; > + struct page **pages; > + struct page *page; > + int i; > + > + map_nr_pages = nr_pages > 0 ? nr_pages:1; > + pages = kmalloc(map_nr_pages * sizeof(*page), GFP_KERNEL); > + if (!pages) > + return -1; > + > + for (i = 0; i < map_nr_pages; i++) { > + page = alloc_pages(GFP_KERNEL, 1); Pedantry, but given I literally patched this pedantically the other day, this could be alloc_page(GFP_KERNEL) :) > + if (!page) > + return -1; We're leaking memory here right? Should jump to cleanup below. > + > + pages[i] = page; > + } You should be able to replace this with something like:- unsigned long nr_allocated; ... nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages); if (nr_allocated != map_nr_pages) goto cleanup; > + > + /* Run the test loop. */ > + for (i = 0; i < test_loop_count; i++) { > + v_ptr = vm_map_ram(pages, map_nr_pages, -1); NIT: The -1 would be clearer as NUMA_NO_NODE > + *v_ptr = 'a'; > + vm_unmap_ram(v_ptr, map_nr_pages); > + } > + Reference to the above you'd add the cleanup label here:- cleanup: > + for (i = 0; i < map_nr_pages; i++) { > + p_ptr = page_address(pages[i]); > + free_pages((unsigned long)p_ptr, 1); Nit, can be free_page((unsigned long)p_ptr); > + } > + > + kfree(pages); > + return 0; > +} > + > struct test_case_desc { > const char *test_name; > int (*test_func)(void); > @@ -374,6 +414,7 @@ static struct test_case_desc test_case_array[] = { > { "pcpu_alloc_test", pcpu_alloc_test }, > { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, > { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, > + { "vm_map_ram_test", vm_map_ram_test }, > /* Add a new test case here. */ > }; > > -- > 2.30.2 >
> On Mon, Mar 27, 2023 at 07:01:26PM +0200, Uladzislau Rezki (Sony) wrote: > > Add vm_map_ram()/vm_unmap_ram() test case to our stress test-suite. > > > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > > --- > > lib/test_vmalloc.c | 41 +++++++++++++++++++++++++++++++++++++++++ > > 1 file changed, 41 insertions(+) > > > > diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c > > index cd2bdba6d3ed..6633eda4cd4d 100644 > > --- a/lib/test_vmalloc.c > > +++ b/lib/test_vmalloc.c > > @@ -53,6 +53,7 @@ __param(int, run_test_mask, INT_MAX, > > "\t\tid: 128, name: pcpu_alloc_test\n" > > "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" > > "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" > > + "\t\tid: 1024, name: vm_map_ram_test\n" > > /* Add a new test case description here. */ > > ); > > > > @@ -358,6 +359,45 @@ kvfree_rcu_2_arg_vmalloc_test(void) > > return 0; > > } > > > > +static int > > +vm_map_ram_test(void) > > +{ > > + unsigned int map_nr_pages; > > + unsigned char *v_ptr; > > + unsigned char *p_ptr; > > + struct page **pages; > > + struct page *page; > > + int i; > > + > > + map_nr_pages = nr_pages > 0 ? nr_pages:1; > > + pages = kmalloc(map_nr_pages * sizeof(*page), GFP_KERNEL); > > + if (!pages) > > + return -1; > > + > > + for (i = 0; i < map_nr_pages; i++) { > > + page = alloc_pages(GFP_KERNEL, 1); > > Pedantry, but given I literally patched this pedantically the other day, > this could be alloc_page(GFP_KERNEL) :) > > > + if (!page) > > + return -1; > > We're leaking memory here right? Should jump to cleanup below. > > > + > > + pages[i] = page; > > + } > > > You should be able to replace this with something like:- > > unsigned long nr_allocated; > > ... > > nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages); > if (nr_allocated != map_nr_pages) > goto cleanup; > > > + > > + /* Run the test loop. */ > > + for (i = 0; i < test_loop_count; i++) { > > + v_ptr = vm_map_ram(pages, map_nr_pages, -1); > > NIT: The -1 would be clearer as NUMA_NO_NODE > > > + *v_ptr = 'a'; > > + vm_unmap_ram(v_ptr, map_nr_pages); > > + } > > + > > Reference to the above you'd add the cleanup label here:- > > cleanup: > > > + for (i = 0; i < map_nr_pages; i++) { > > + p_ptr = page_address(pages[i]); > > + free_pages((unsigned long)p_ptr, 1); > > Nit, can be free_page((unsigned long)p_ptr); > Thank you. Will fix all comments, especially switching to the alloc_page() new API :) -- Uladzislau Rezki
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index cd2bdba6d3ed..6633eda4cd4d 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -53,6 +53,7 @@ __param(int, run_test_mask, INT_MAX, "\t\tid: 128, name: pcpu_alloc_test\n" "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" + "\t\tid: 1024, name: vm_map_ram_test\n" /* Add a new test case description here. */ ); @@ -358,6 +359,45 @@ kvfree_rcu_2_arg_vmalloc_test(void) return 0; } +static int +vm_map_ram_test(void) +{ + unsigned int map_nr_pages; + unsigned char *v_ptr; + unsigned char *p_ptr; + struct page **pages; + struct page *page; + int i; + + map_nr_pages = nr_pages > 0 ? nr_pages:1; + pages = kmalloc(map_nr_pages * sizeof(*page), GFP_KERNEL); + if (!pages) + return -1; + + for (i = 0; i < map_nr_pages; i++) { + page = alloc_pages(GFP_KERNEL, 1); + if (!page) + return -1; + + pages[i] = page; + } + + /* Run the test loop. */ + for (i = 0; i < test_loop_count; i++) { + v_ptr = vm_map_ram(pages, map_nr_pages, -1); + *v_ptr = 'a'; + vm_unmap_ram(v_ptr, map_nr_pages); + } + + for (i = 0; i < map_nr_pages; i++) { + p_ptr = page_address(pages[i]); + free_pages((unsigned long)p_ptr, 1); + } + + kfree(pages); + return 0; +} + struct test_case_desc { const char *test_name; int (*test_func)(void); @@ -374,6 +414,7 @@ static struct test_case_desc test_case_array[] = { { "pcpu_alloc_test", pcpu_alloc_test }, { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, + { "vm_map_ram_test", vm_map_ram_test }, /* Add a new test case here. */ };
Add vm_map_ram()/vm_unmap_ram() test case to our stress test-suite. Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> --- lib/test_vmalloc.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+)