diff mbox series

[V2,2/6] mm/vmalloc: Avoid iterating over per CPU vmap blocks twice

Message ID 20230525124504.633469722@linutronix.de (mailing list archive)
State New
Headers show
Series mm/vmalloc: Assorted fixes and improvements | expand

Commit Message

Thomas Gleixner May 25, 2023, 12:57 p.m. UTC
_vunmap_aliases() walks the per CPU xarrays to find partially unmapped
blocks and then walks the per cpu free lists to purge fragmented blocks.

Arguably that's waste of CPU cycles and cache lines as the full xarray walk
already touches every block.

Avoid this double iteration:

  - Split out the code to purge one block and the code to free the local
    purge list into helper functions.

  - Try to purge the fragmented blocks in the xarray walk before looking at
    their dirty space.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V2: Fix coding style issues - Christoph
---
 mm/vmalloc.c |   70 ++++++++++++++++++++++++++++++++++++++---------------------
 1 file changed, 46 insertions(+), 24 deletions(-)

Comments

Christoph Hellwig May 26, 2023, 7:55 a.m. UTC | #1
Looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
Lorenzo Stoakes May 27, 2023, 5:38 p.m. UTC | #2
On Thu, May 25, 2023 at 02:57:04PM +0200, Thomas Gleixner wrote:
> _vunmap_aliases() walks the per CPU xarrays to find partially unmapped
> blocks and then walks the per cpu free lists to purge fragmented blocks.
>
> Arguably that's waste of CPU cycles and cache lines as the full xarray walk
> already touches every block.
>
> Avoid this double iteration:
>
>   - Split out the code to purge one block and the code to free the local
>     purge list into helper functions.
>
>   - Try to purge the fragmented blocks in the xarray walk before looking at
>     their dirty space.
>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
> V2: Fix coding style issues - Christoph
> ---
>  mm/vmalloc.c |   70 ++++++++++++++++++++++++++++++++++++++---------------------
>  1 file changed, 46 insertions(+), 24 deletions(-)
>
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2086,39 +2086,54 @@ static void free_vmap_block(struct vmap_
>  	kfree_rcu(vb, rcu_head);
>  }
>
> +static bool purge_fragmented_block(struct vmap_block *vb,
> +		struct vmap_block_queue *vbq, struct list_head *purge_list)
> +{
> +	if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
> +	    vb->dirty == VMAP_BBMAP_BITS)
> +		return false;
> +
> +	/* prevent further allocs after releasing lock */
> +	vb->free = 0;
> +	/* prevent purging it again */
> +	vb->dirty = VMAP_BBMAP_BITS;
> +	vb->dirty_min = 0;
> +	vb->dirty_max = VMAP_BBMAP_BITS;
> +	spin_lock(&vbq->lock);
> +	list_del_rcu(&vb->free_list);
> +	spin_unlock(&vbq->lock);
> +	list_add_tail(&vb->purge, purge_list);
> +	return true;
> +}
> +
> +static void free_purged_blocks(struct list_head *purge_list)
> +{
> +	struct vmap_block *vb, *n_vb;
> +
> +	list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
> +		list_del(&vb->purge);
> +		free_vmap_block(vb);
> +	}
> +}
> +
>  static void purge_fragmented_blocks(int cpu)
>  {
>  	LIST_HEAD(purge);
>  	struct vmap_block *vb;
> -	struct vmap_block *n_vb;
>  	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
>
>  	rcu_read_lock();
>  	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
> -
> -		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
> +		if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
> +		    vb->dirty == VMAP_BBMAP_BITS)
>  			continue;
>
>  		spin_lock(&vb->lock);
> -		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
> -			vb->free = 0; /* prevent further allocs after releasing lock */
> -			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
> -			vb->dirty_min = 0;
> -			vb->dirty_max = VMAP_BBMAP_BITS;
> -			spin_lock(&vbq->lock);
> -			list_del_rcu(&vb->free_list);
> -			spin_unlock(&vbq->lock);
> -			spin_unlock(&vb->lock);
> -			list_add_tail(&vb->purge, &purge);
> -		} else
> -			spin_unlock(&vb->lock);
> +		purge_fragmented_block(vb, vbq, &purge);
> +		spin_unlock(&vb->lock);
>  	}
>  	rcu_read_unlock();
> -
> -	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
> -		list_del(&vb->purge);
> -		free_vmap_block(vb);
> -	}
> +	free_purged_blocks(&purge);
>  }
>
>  static void purge_fragmented_blocks_allcpus(void)
> @@ -2226,12 +2241,13 @@ static void vb_free(unsigned long addr,
>
>  static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
>  {
> +	LIST_HEAD(purge_list);
>  	int cpu;
>
>  	if (unlikely(!vmap_initialized))
>  		return;
>
> -	might_sleep();
> +	mutex_lock(&vmap_purge_lock);
>
>  	for_each_possible_cpu(cpu) {
>  		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
> @@ -2241,7 +2257,14 @@ static void _vm_unmap_aliases(unsigned l
>  		rcu_read_lock();
>  		xa_for_each(&vbq->vmap_blocks, idx, vb) {
>  			spin_lock(&vb->lock);
> -			if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
> +
> +			/*
> +			 * Try to purge a fragmented block first. If it's
> +			 * not purgeable, check whether there is dirty
> +			 * space to be flushed.
> +			 */
> +			if (!purge_fragmented_block(vb, vbq, &purge_list) &&
> +			    vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
>  				unsigned long va_start = vb->va->va_start;
>  				unsigned long s, e;
>
> @@ -2257,9 +2280,8 @@ static void _vm_unmap_aliases(unsigned l
>  		}
>  		rcu_read_unlock();
>  	}
> +	free_purged_blocks(&purge_list);
>
> -	mutex_lock(&vmap_purge_lock);
> -	purge_fragmented_blocks_allcpus();

Nice catch... and ugh that we were repeating work here.

>  	if (!__purge_vmap_area_lazy(start, end) && flush)
>  		flush_tlb_kernel_range(start, end);
>  	mutex_unlock(&vmap_purge_lock);
>
>

Overall nice cleanup, it feels like vmalloc has a lot of dirty, musty
corners. Glad you've broken out the feather duster :)

Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
diff mbox series

Patch

--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2086,39 +2086,54 @@  static void free_vmap_block(struct vmap_
 	kfree_rcu(vb, rcu_head);
 }
 
+static bool purge_fragmented_block(struct vmap_block *vb,
+		struct vmap_block_queue *vbq, struct list_head *purge_list)
+{
+	if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
+	    vb->dirty == VMAP_BBMAP_BITS)
+		return false;
+
+	/* prevent further allocs after releasing lock */
+	vb->free = 0;
+	/* prevent purging it again */
+	vb->dirty = VMAP_BBMAP_BITS;
+	vb->dirty_min = 0;
+	vb->dirty_max = VMAP_BBMAP_BITS;
+	spin_lock(&vbq->lock);
+	list_del_rcu(&vb->free_list);
+	spin_unlock(&vbq->lock);
+	list_add_tail(&vb->purge, purge_list);
+	return true;
+}
+
+static void free_purged_blocks(struct list_head *purge_list)
+{
+	struct vmap_block *vb, *n_vb;
+
+	list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
+		list_del(&vb->purge);
+		free_vmap_block(vb);
+	}
+}
+
 static void purge_fragmented_blocks(int cpu)
 {
 	LIST_HEAD(purge);
 	struct vmap_block *vb;
-	struct vmap_block *n_vb;
 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
-
-		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
+		if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
+		    vb->dirty == VMAP_BBMAP_BITS)
 			continue;
 
 		spin_lock(&vb->lock);
-		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
-			vb->free = 0; /* prevent further allocs after releasing lock */
-			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
-			vb->dirty_min = 0;
-			vb->dirty_max = VMAP_BBMAP_BITS;
-			spin_lock(&vbq->lock);
-			list_del_rcu(&vb->free_list);
-			spin_unlock(&vbq->lock);
-			spin_unlock(&vb->lock);
-			list_add_tail(&vb->purge, &purge);
-		} else
-			spin_unlock(&vb->lock);
+		purge_fragmented_block(vb, vbq, &purge);
+		spin_unlock(&vb->lock);
 	}
 	rcu_read_unlock();
-
-	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
-		list_del(&vb->purge);
-		free_vmap_block(vb);
-	}
+	free_purged_blocks(&purge);
 }
 
 static void purge_fragmented_blocks_allcpus(void)
@@ -2226,12 +2241,13 @@  static void vb_free(unsigned long addr,
 
 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
 {
+	LIST_HEAD(purge_list);
 	int cpu;
 
 	if (unlikely(!vmap_initialized))
 		return;
 
-	might_sleep();
+	mutex_lock(&vmap_purge_lock);
 
 	for_each_possible_cpu(cpu) {
 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
@@ -2241,7 +2257,14 @@  static void _vm_unmap_aliases(unsigned l
 		rcu_read_lock();
 		xa_for_each(&vbq->vmap_blocks, idx, vb) {
 			spin_lock(&vb->lock);
-			if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
+
+			/*
+			 * Try to purge a fragmented block first. If it's
+			 * not purgeable, check whether there is dirty
+			 * space to be flushed.
+			 */
+			if (!purge_fragmented_block(vb, vbq, &purge_list) &&
+			    vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
 				unsigned long va_start = vb->va->va_start;
 				unsigned long s, e;
 
@@ -2257,9 +2280,8 @@  static void _vm_unmap_aliases(unsigned l
 		}
 		rcu_read_unlock();
 	}
+	free_purged_blocks(&purge_list);
 
-	mutex_lock(&vmap_purge_lock);
-	purge_fragmented_blocks_allcpus();
 	if (!__purge_vmap_area_lazy(start, end) && flush)
 		flush_tlb_kernel_range(start, end);
 	mutex_unlock(&vmap_purge_lock);