diff mbox

[v2,10/10] mm, cma: use spinlock instead of mutex

Message ID 1402543307-29800-11-git-send-email-iamjoonsoo.kim@lge.com (mailing list archive)
State New, archived
Headers show

Commit Message

Joonsoo Kim June 12, 2014, 3:21 a.m. UTC
Currently, we should take the mutex for manipulating bitmap.
This job may be really simple and short so we don't need to sleep
if contended. So I change it to spinlock.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

Comments

Minchan Kim June 12, 2014, 7:40 a.m. UTC | #1
On Thu, Jun 12, 2014 at 12:21:47PM +0900, Joonsoo Kim wrote:
> Currently, we should take the mutex for manipulating bitmap.
> This job may be really simple and short so we don't need to sleep
> if contended. So I change it to spinlock.

I'm not sure it would be good always.
Maybe you remember we discussed about similar stuff about bitmap
searching in vmap friend internally, which was really painful
when it was fragmented. So, at least we need number if you really want
and I hope the number from ARM machine most popular platform for CMA
at the moment.

> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> diff --git a/mm/cma.c b/mm/cma.c
> index 22a5b23..3085e8c 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -27,6 +27,7 @@
>  #include <linux/memblock.h>
>  #include <linux/err.h>
>  #include <linux/mm.h>
> +#include <linux/spinlock.h>
>  #include <linux/mutex.h>
>  #include <linux/sizes.h>
>  #include <linux/slab.h>
> @@ -36,7 +37,7 @@ struct cma {
>  	unsigned long	count;
>  	unsigned long	*bitmap;
>  	int order_per_bit; /* Order of pages represented by one bit */
> -	struct mutex	lock;
> +	spinlock_t	lock;
>  };
>  
>  /*
> @@ -72,9 +73,9 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>  	bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit;
>  	nr_bits = cma_bitmap_pages_to_bits(cma, count);
>  
> -	mutex_lock(&cma->lock);
> +	spin_lock(&cma->lock);
>  	bitmap_clear(cma->bitmap, bitmapno, nr_bits);
> -	mutex_unlock(&cma->lock);
> +	spin_unlock(&cma->lock);
>  }
>  
>  static int __init cma_activate_area(struct cma *cma)
> @@ -112,7 +113,7 @@ static int __init cma_activate_area(struct cma *cma)
>  		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
>  	} while (--i);
>  
> -	mutex_init(&cma->lock);
> +	spin_lock_init(&cma->lock);
>  	return 0;
>  
>  err:
> @@ -261,11 +262,11 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
>  	nr_bits = cma_bitmap_pages_to_bits(cma, count);
>  
>  	for (;;) {
> -		mutex_lock(&cma->lock);
> +		spin_lock(&cma->lock);
>  		bitmapno = bitmap_find_next_zero_area(cma->bitmap,
>  					bitmap_maxno, start, nr_bits, mask);
>  		if (bitmapno >= bitmap_maxno) {
> -			mutex_unlock(&cma->lock);
> +			spin_unlock(&cma->lock);
>  			break;
>  		}
>  		bitmap_set(cma->bitmap, bitmapno, nr_bits);
> @@ -274,7 +275,7 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
>  		 * our exclusive use. If the migration fails we will take the
>  		 * lock again and unmark it.
>  		 */
> -		mutex_unlock(&cma->lock);
> +		spin_unlock(&cma->lock);
>  
>  		pfn = cma->base_pfn + (bitmapno << cma->order_per_bit);
>  		mutex_lock(&cma_mutex);
> -- 
> 1.7.9.5
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
Joonsoo Kim June 12, 2014, 7:56 a.m. UTC | #2
On Thu, Jun 12, 2014 at 04:40:29PM +0900, Minchan Kim wrote:
> On Thu, Jun 12, 2014 at 12:21:47PM +0900, Joonsoo Kim wrote:
> > Currently, we should take the mutex for manipulating bitmap.
> > This job may be really simple and short so we don't need to sleep
> > if contended. So I change it to spinlock.
> 
> I'm not sure it would be good always.
> Maybe you remember we discussed about similar stuff about bitmap
> searching in vmap friend internally, which was really painful
> when it was fragmented. So, at least we need number if you really want
> and I hope the number from ARM machine most popular platform for CMA
> at the moment.

Good Point!! Agreed. I will drop this one in next spin and re-submit
in separate patchset after some testing.

Thanks.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/mm/cma.c b/mm/cma.c
index 22a5b23..3085e8c 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -27,6 +27,7 @@ 
 #include <linux/memblock.h>
 #include <linux/err.h>
 #include <linux/mm.h>
+#include <linux/spinlock.h>
 #include <linux/mutex.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
@@ -36,7 +37,7 @@  struct cma {
 	unsigned long	count;
 	unsigned long	*bitmap;
 	int order_per_bit; /* Order of pages represented by one bit */
-	struct mutex	lock;
+	spinlock_t	lock;
 };
 
 /*
@@ -72,9 +73,9 @@  static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
 	bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit;
 	nr_bits = cma_bitmap_pages_to_bits(cma, count);
 
-	mutex_lock(&cma->lock);
+	spin_lock(&cma->lock);
 	bitmap_clear(cma->bitmap, bitmapno, nr_bits);
-	mutex_unlock(&cma->lock);
+	spin_unlock(&cma->lock);
 }
 
 static int __init cma_activate_area(struct cma *cma)
@@ -112,7 +113,7 @@  static int __init cma_activate_area(struct cma *cma)
 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
 	} while (--i);
 
-	mutex_init(&cma->lock);
+	spin_lock_init(&cma->lock);
 	return 0;
 
 err:
@@ -261,11 +262,11 @@  struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
 	nr_bits = cma_bitmap_pages_to_bits(cma, count);
 
 	for (;;) {
-		mutex_lock(&cma->lock);
+		spin_lock(&cma->lock);
 		bitmapno = bitmap_find_next_zero_area(cma->bitmap,
 					bitmap_maxno, start, nr_bits, mask);
 		if (bitmapno >= bitmap_maxno) {
-			mutex_unlock(&cma->lock);
+			spin_unlock(&cma->lock);
 			break;
 		}
 		bitmap_set(cma->bitmap, bitmapno, nr_bits);
@@ -274,7 +275,7 @@  struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
 		 * our exclusive use. If the migration fails we will take the
 		 * lock again and unmark it.
 		 */
-		mutex_unlock(&cma->lock);
+		spin_unlock(&cma->lock);
 
 		pfn = cma->base_pfn + (bitmapno << cma->order_per_bit);
 		mutex_lock(&cma_mutex);