diff mbox series

[2/5] mm/vmalloc: Extend __alloc_vmap_area() with extra arguments

Message ID 20220607093449.3100-3-urezki@gmail.com (mailing list archive)
State New
Headers show
Series Reduce a vmalloc internal lock contention preparation work | expand

Commit Message

Uladzislau Rezki June 7, 2022, 9:34 a.m. UTC
It implies that __alloc_vmap_area() allocates only from the
global vmap space, therefore a list-head and rb-tree, which
represent a free vmap space, are not passed as parameters to
this function and are accessed directly from this function.

Extend the __alloc_vmap_area() and other dependent functions
to have a possibility to allocate from different trees making
an interface common and not specific.

There is no functional change as a result of this patch.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 mm/vmalloc.c | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

Comments

Christoph Hellwig June 7, 2022, 9:49 a.m. UTC | #1
On Tue, Jun 07, 2022 at 11:34:46AM +0200, Uladzislau Rezki (Sony) wrote:
> It implies that __alloc_vmap_area() allocates only from the
> global vmap space, therefore a list-head and rb-tree, which
> represent a free vmap space, are not passed as parameters to
> this function and are accessed directly from this function.

Yes, which totally makes sense.

> Extend the __alloc_vmap_area() and other dependent functions
> to have a possibility to allocate from different trees making
> an interface common and not specific.

Which seems completely pointless.  Why add argument that are always
passed the same values?
Uladzislau Rezki June 7, 2022, 1:02 p.m. UTC | #2
>
> On Tue, Jun 07, 2022 at 11:34:46AM +0200, Uladzislau Rezki (Sony) wrote:
> > It implies that __alloc_vmap_area() allocates only from the
> > global vmap space, therefore a list-head and rb-tree, which
> > represent a free vmap space, are not passed as parameters to
> > this function and are accessed directly from this function.
>
> Yes, which totally makes sense.
>
> > Extend the __alloc_vmap_area() and other dependent functions
> > to have a possibility to allocate from different trees making
> > an interface common and not specific.
>
> Which seems completely pointless.  Why add argument that are always
> passed the same values?
>
I wrote about it in the cover latter. It is a preparation work for
making vmalloc per-cpu.
In that case free/busy data are located on different rb_roots that is
why those functions
have to be adopted to work with any tree.
Baoquan He June 8, 2022, 3:46 a.m. UTC | #3
On 06/07/22 at 11:34am, Uladzislau Rezki (Sony) wrote:
> It implies that __alloc_vmap_area() allocates only from the
> global vmap space, therefore a list-head and rb-tree, which
> represent a free vmap space, are not passed as parameters to
> this function and are accessed directly from this function.
> 
> Extend the __alloc_vmap_area() and other dependent functions
> to have a possibility to allocate from different trees making
> an interface common and not specific.
> 
> There is no functional change as a result of this patch.

LGTM,

Reviewed-by: Baoquan He <bhe@redhat.com>

> 
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> ---
>  mm/vmalloc.c | 30 ++++++++++++++++--------------
>  1 file changed, 16 insertions(+), 14 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 0102d6d5fcdf..745e89eb6ca1 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1234,15 +1234,15 @@ is_within_this_va(struct vmap_area *va, unsigned long size,
>   * overhead.
>   */
>  static __always_inline struct vmap_area *
> -find_vmap_lowest_match(unsigned long size, unsigned long align,
> -	unsigned long vstart, bool adjust_search_size)
> +find_vmap_lowest_match(struct rb_root *root, unsigned long size,
> +	unsigned long align, unsigned long vstart, bool adjust_search_size)
>  {
>  	struct vmap_area *va;
>  	struct rb_node *node;
>  	unsigned long length;
>  
>  	/* Start from the root. */
> -	node = free_vmap_area_root.rb_node;
> +	node = root->rb_node;
>  
>  	/* Adjust the search size for alignment overhead. */
>  	length = adjust_search_size ? size + align - 1 : size;
> @@ -1370,9 +1370,9 @@ classify_va_fit_type(struct vmap_area *va,
>  }
>  
>  static __always_inline int
> -adjust_va_to_fit_type(struct vmap_area *va,
> -	unsigned long nva_start_addr, unsigned long size,
> -	enum fit_type type)
> +adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
> +	struct vmap_area *va, unsigned long nva_start_addr,
> +	unsigned long size, enum fit_type type)
>  {
>  	struct vmap_area *lva = NULL;
>  
> @@ -1384,7 +1384,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
>  		 * V      NVA      V
>  		 * |---------------|
>  		 */
> -		unlink_va_augment(va, &free_vmap_area_root);
> +		unlink_va_augment(va, root);
>  		kmem_cache_free(vmap_area_cachep, va);
>  	} else if (type == LE_FIT_TYPE) {
>  		/*
> @@ -1462,8 +1462,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
>  		augment_tree_propagate_from(va);
>  
>  		if (lva)	/* type == NE_FIT_TYPE */
> -			insert_vmap_area_augment(lva, &va->rb_node,
> -				&free_vmap_area_root, &free_vmap_area_list);
> +			insert_vmap_area_augment(lva, &va->rb_node, root, head);
>  	}
>  
>  	return 0;
> @@ -1474,7 +1473,8 @@ adjust_va_to_fit_type(struct vmap_area *va,
>   * Otherwise a vend is returned that indicates failure.
>   */
>  static __always_inline unsigned long
> -__alloc_vmap_area(unsigned long size, unsigned long align,
> +__alloc_vmap_area(struct rb_root *root, struct list_head *head,
> +	unsigned long size, unsigned long align,
>  	unsigned long vstart, unsigned long vend)
>  {
>  	bool adjust_search_size = true;
> @@ -1495,7 +1495,7 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
>  	if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
>  		adjust_search_size = false;
>  
> -	va = find_vmap_lowest_match(size, align, vstart, adjust_search_size);
> +	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
>  	if (unlikely(!va))
>  		return vend;
>  
> @@ -1514,7 +1514,7 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
>  		return vend;
>  
>  	/* Update the free vmap_area. */
> -	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
> +	ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size, type);
>  	if (ret)
>  		return vend;
>  
> @@ -1605,7 +1605,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  
>  retry:
>  	preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
> -	addr = __alloc_vmap_area(size, align, vstart, vend);
> +	addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
> +		size, align, vstart, vend);
>  	spin_unlock(&free_vmap_area_lock);
>  
>  	/*
> @@ -3886,7 +3887,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>  			/* It is a BUG(), but trigger recovery instead. */
>  			goto recovery;
>  
> -		ret = adjust_va_to_fit_type(va, start, size, type);
> +		ret = adjust_va_to_fit_type(&free_vmap_area_root,
> +				&free_vmap_area_list, va, start, size, type);
>  		if (unlikely(ret))
>  			goto recovery;
>  
> -- 
> 2.30.2
> 
>
Christoph Hellwig June 10, 2022, 8:18 a.m. UTC | #4
On Tue, Jun 07, 2022 at 03:02:42PM +0200, Uladzislau Rezki wrote:
> I wrote about it in the cover latter. It is a preparation work for
> making vmalloc per-cpu.

Then:

 a) state this in the actual patch commit log.  Those need to be
    standalone and sufficiently describe what the patch is doing.
 b) do that in the actual series that makes use of them.  In fact
    for these kinds of changes the prep patch really seems like a bad
    idea to start with and it would be much easier to follow if the
    changes were done in the main patch.
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0102d6d5fcdf..745e89eb6ca1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1234,15 +1234,15 @@  is_within_this_va(struct vmap_area *va, unsigned long size,
  * overhead.
  */
 static __always_inline struct vmap_area *
-find_vmap_lowest_match(unsigned long size, unsigned long align,
-	unsigned long vstart, bool adjust_search_size)
+find_vmap_lowest_match(struct rb_root *root, unsigned long size,
+	unsigned long align, unsigned long vstart, bool adjust_search_size)
 {
 	struct vmap_area *va;
 	struct rb_node *node;
 	unsigned long length;
 
 	/* Start from the root. */
-	node = free_vmap_area_root.rb_node;
+	node = root->rb_node;
 
 	/* Adjust the search size for alignment overhead. */
 	length = adjust_search_size ? size + align - 1 : size;
@@ -1370,9 +1370,9 @@  classify_va_fit_type(struct vmap_area *va,
 }
 
 static __always_inline int
-adjust_va_to_fit_type(struct vmap_area *va,
-	unsigned long nva_start_addr, unsigned long size,
-	enum fit_type type)
+adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
+	struct vmap_area *va, unsigned long nva_start_addr,
+	unsigned long size, enum fit_type type)
 {
 	struct vmap_area *lva = NULL;
 
@@ -1384,7 +1384,7 @@  adjust_va_to_fit_type(struct vmap_area *va,
 		 * V      NVA      V
 		 * |---------------|
 		 */
-		unlink_va_augment(va, &free_vmap_area_root);
+		unlink_va_augment(va, root);
 		kmem_cache_free(vmap_area_cachep, va);
 	} else if (type == LE_FIT_TYPE) {
 		/*
@@ -1462,8 +1462,7 @@  adjust_va_to_fit_type(struct vmap_area *va,
 		augment_tree_propagate_from(va);
 
 		if (lva)	/* type == NE_FIT_TYPE */
-			insert_vmap_area_augment(lva, &va->rb_node,
-				&free_vmap_area_root, &free_vmap_area_list);
+			insert_vmap_area_augment(lva, &va->rb_node, root, head);
 	}
 
 	return 0;
@@ -1474,7 +1473,8 @@  adjust_va_to_fit_type(struct vmap_area *va,
  * Otherwise a vend is returned that indicates failure.
  */
 static __always_inline unsigned long
-__alloc_vmap_area(unsigned long size, unsigned long align,
+__alloc_vmap_area(struct rb_root *root, struct list_head *head,
+	unsigned long size, unsigned long align,
 	unsigned long vstart, unsigned long vend)
 {
 	bool adjust_search_size = true;
@@ -1495,7 +1495,7 @@  __alloc_vmap_area(unsigned long size, unsigned long align,
 	if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
 		adjust_search_size = false;
 
-	va = find_vmap_lowest_match(size, align, vstart, adjust_search_size);
+	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
 	if (unlikely(!va))
 		return vend;
 
@@ -1514,7 +1514,7 @@  __alloc_vmap_area(unsigned long size, unsigned long align,
 		return vend;
 
 	/* Update the free vmap_area. */
-	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
+	ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size, type);
 	if (ret)
 		return vend;
 
@@ -1605,7 +1605,8 @@  static struct vmap_area *alloc_vmap_area(unsigned long size,
 
 retry:
 	preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
-	addr = __alloc_vmap_area(size, align, vstart, vend);
+	addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
+		size, align, vstart, vend);
 	spin_unlock(&free_vmap_area_lock);
 
 	/*
@@ -3886,7 +3887,8 @@  struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 			/* It is a BUG(), but trigger recovery instead. */
 			goto recovery;
 
-		ret = adjust_va_to_fit_type(va, start, size, type);
+		ret = adjust_va_to_fit_type(&free_vmap_area_root,
+				&free_vmap_area_list, va, start, size, type);
 		if (unlikely(ret))
 			goto recovery;