diff mbox series

[v4,2/2] lib/test_vmalloc.c: Add vm_map_ram()/vm_unmap_ram() test case

Message ID 20230330190639.431589-2-urezki@gmail.com (mailing list archive)
State New
Headers show
Series [v4,1/2] mm: vmalloc: Remove a global vmap_blocks xarray | expand

Commit Message

Uladzislau Rezki March 30, 2023, 7:06 p.m. UTC
Add vm_map_ram()/vm_unmap_ram() test case to our stress test-suite.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 lib/test_vmalloc.c | 37 +++++++++++++++++++++++++++++++++++++
 1 file changed, 37 insertions(+)

Comments

Lorenzo Stoakes March 30, 2023, 7:36 p.m. UTC | #1
On Thu, Mar 30, 2023 at 09:06:39PM +0200, Uladzislau Rezki (Sony) wrote:
> Add vm_map_ram()/vm_unmap_ram() test case to our stress test-suite.
>
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> ---
>  lib/test_vmalloc.c | 37 +++++++++++++++++++++++++++++++++++++
>  1 file changed, 37 insertions(+)
>
> diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
> index cd2bdba6d3ed..1622f3f9ec23 100644
> --- a/lib/test_vmalloc.c
> +++ b/lib/test_vmalloc.c
> @@ -53,6 +53,7 @@ __param(int, run_test_mask, INT_MAX,
>  		"\t\tid: 128,  name: pcpu_alloc_test\n"
>  		"\t\tid: 256,  name: kvfree_rcu_1_arg_vmalloc_test\n"
>  		"\t\tid: 512,  name: kvfree_rcu_2_arg_vmalloc_test\n"
> +		"\t\tid: 1024, name: vm_map_ram_test\n"
>  		/* Add a new test case description here. */
>  );
>
> @@ -358,6 +359,41 @@ kvfree_rcu_2_arg_vmalloc_test(void)
>  	return 0;
>  }
>
> +static int
> +vm_map_ram_test(void)
> +{
> +	unsigned long nr_allocated;
> +	unsigned int map_nr_pages;
> +	unsigned char *v_ptr;
> +	struct page **pages;
> +	int i;
> +
> +	map_nr_pages = nr_pages > 0 ? nr_pages:1;
> +	pages = kmalloc(map_nr_pages * sizeof(struct page), GFP_KERNEL);
> +	if (!pages)
> +		return -1;
> +
> +	nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
> +	if (nr_allocated != map_nr_pages)
> +        goto cleanup;

Nitty, but there's a whitespace error here, need to convert to tabs :)

> +
> +	/* Run the test loop. */
> +	for (i = 0; i < test_loop_count; i++) {
> +		v_ptr = vm_map_ram(pages, map_nr_pages, NUMA_NO_NODE);
> +		*v_ptr = 'a';
> +		vm_unmap_ram(v_ptr, map_nr_pages);
> +	}
> +
> +cleanup:
> +	for (i = 0; i < nr_allocated; i++)
> +		__free_page(pages[i]);
> +
> +	kfree(pages);
> +
> +	/* 0 indicates success. */
> +	return nr_allocated != map_nr_pages;
> +}
> +
>  struct test_case_desc {
>  	const char *test_name;
>  	int (*test_func)(void);
> @@ -374,6 +410,7 @@ static struct test_case_desc test_case_array[] = {
>  	{ "pcpu_alloc_test", pcpu_alloc_test },
>  	{ "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test },
>  	{ "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test },
> +	{ "vm_map_ram_test", vm_map_ram_test },
>  	/* Add a new test case here. */
>  };
>
> --
> 2.30.2
>

Other than the nit,

Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
Baoquan He March 30, 2023, 11:59 p.m. UTC | #2
On 03/30/23 at 09:06pm, Uladzislau Rezki (Sony) wrote:
> Add vm_map_ram()/vm_unmap_ram() test case to our stress test-suite.
> 
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> ---
>  lib/test_vmalloc.c | 37 +++++++++++++++++++++++++++++++++++++
>  1 file changed, 37 insertions(+)
> 
> diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
> index cd2bdba6d3ed..1622f3f9ec23 100644
> --- a/lib/test_vmalloc.c
> +++ b/lib/test_vmalloc.c
> @@ -53,6 +53,7 @@ __param(int, run_test_mask, INT_MAX,
>  		"\t\tid: 128,  name: pcpu_alloc_test\n"
>  		"\t\tid: 256,  name: kvfree_rcu_1_arg_vmalloc_test\n"
>  		"\t\tid: 512,  name: kvfree_rcu_2_arg_vmalloc_test\n"
> +		"\t\tid: 1024, name: vm_map_ram_test\n"
>  		/* Add a new test case description here. */
>  );
>  
> @@ -358,6 +359,41 @@ kvfree_rcu_2_arg_vmalloc_test(void)
>  	return 0;
>  }
>  
> +static int
> +vm_map_ram_test(void)
> +{
> +	unsigned long nr_allocated;
> +	unsigned int map_nr_pages;
> +	unsigned char *v_ptr;
> +	struct page **pages;
> +	int i;
> +
> +	map_nr_pages = nr_pages > 0 ? nr_pages:1;
> +	pages = kmalloc(map_nr_pages * sizeof(struct page), GFP_KERNEL);
> +	if (!pages)
> +		return -1;
> +
> +	nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
> +	if (nr_allocated != map_nr_pages)
> +        goto cleanup;
> +
> +	/* Run the test loop. */
> +	for (i = 0; i < test_loop_count; i++) {
> +		v_ptr = vm_map_ram(pages, map_nr_pages, NUMA_NO_NODE);
> +		*v_ptr = 'a';
> +		vm_unmap_ram(v_ptr, map_nr_pages);
> +	}
> +
> +cleanup:
> +	for (i = 0; i < nr_allocated; i++)
> +		__free_page(pages[i]);
> +
> +	kfree(pages);
> +
> +	/* 0 indicates success. */
> +	return nr_allocated != map_nr_pages;
> +}
> +
>  struct test_case_desc {
>  	const char *test_name;
>  	int (*test_func)(void);
> @@ -374,6 +410,7 @@ static struct test_case_desc test_case_array[] = {
>  	{ "pcpu_alloc_test", pcpu_alloc_test },
>  	{ "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test },
>  	{ "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test },
> +	{ "vm_map_ram_test", vm_map_ram_test },
>  	/* Add a new test case here. */
>  };

Reviewed-by: Baoquan He <bhe@redhat.com>
diff mbox series

Patch

diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index cd2bdba6d3ed..1622f3f9ec23 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -53,6 +53,7 @@  __param(int, run_test_mask, INT_MAX,
 		"\t\tid: 128,  name: pcpu_alloc_test\n"
 		"\t\tid: 256,  name: kvfree_rcu_1_arg_vmalloc_test\n"
 		"\t\tid: 512,  name: kvfree_rcu_2_arg_vmalloc_test\n"
+		"\t\tid: 1024, name: vm_map_ram_test\n"
 		/* Add a new test case description here. */
 );
 
@@ -358,6 +359,41 @@  kvfree_rcu_2_arg_vmalloc_test(void)
 	return 0;
 }
 
+static int
+vm_map_ram_test(void)
+{
+	unsigned long nr_allocated;
+	unsigned int map_nr_pages;
+	unsigned char *v_ptr;
+	struct page **pages;
+	int i;
+
+	map_nr_pages = nr_pages > 0 ? nr_pages:1;
+	pages = kmalloc(map_nr_pages * sizeof(struct page), GFP_KERNEL);
+	if (!pages)
+		return -1;
+
+	nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
+	if (nr_allocated != map_nr_pages)
+        goto cleanup;
+
+	/* Run the test loop. */
+	for (i = 0; i < test_loop_count; i++) {
+		v_ptr = vm_map_ram(pages, map_nr_pages, NUMA_NO_NODE);
+		*v_ptr = 'a';
+		vm_unmap_ram(v_ptr, map_nr_pages);
+	}
+
+cleanup:
+	for (i = 0; i < nr_allocated; i++)
+		__free_page(pages[i]);
+
+	kfree(pages);
+
+	/* 0 indicates success. */
+	return nr_allocated != map_nr_pages;
+}
+
 struct test_case_desc {
 	const char *test_name;
 	int (*test_func)(void);
@@ -374,6 +410,7 @@  static struct test_case_desc test_case_array[] = {
 	{ "pcpu_alloc_test", pcpu_alloc_test },
 	{ "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test },
 	{ "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test },
+	{ "vm_map_ram_test", vm_map_ram_test },
 	/* Add a new test case here. */
 };