diff mbox series

[2/2] mm: slub: use DEFINE_RAW_SPINLOCK init object_map_lock

Message ID 1621938235-11947-2-git-send-email-yejunedeng@gmail.com (mailing list archive)
State New, archived
Headers show
Series [1/2] mm: slab/slub: use raw_spinlock_t to define list_lock | expand

Commit Message

Yejune Deng May 25, 2021, 10:23 a.m. UTC
Use DEFINE_RAW_SPINLOCK instead of DEFINE_SPINLOCK object_map_lock
that won't be preempted on mainline and PREEMPT_RT kernels.

Signed-off-by: Yejune Deng <yejunedeng@gmail.com>
---
 mm/slub.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Comments

Vlastimil Babka May 25, 2021, 10:46 a.m. UTC | #1
On 5/25/21 12:23 PM, Yejune Deng wrote:
> Use DEFINE_RAW_SPINLOCK instead of DEFINE_SPINLOCK object_map_lock
> that won't be preempted on mainline and PREEMPT_RT kernels.
> 
> Signed-off-by: Yejune Deng <yejunedeng@gmail.com>

RT tree also has such patch, with IMHO more thorough description:

https://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git/tree/patches/0002-mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch?h=linux-5.12.y-rt-patches

I was planning to include that in the next version of my series as that
will indeed be necessary.

> ---
>  mm/slub.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index c2f63c3..995f3d0 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -445,7 +445,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
>  
>  #ifdef CONFIG_SLUB_DEBUG
>  static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
> -static DEFINE_SPINLOCK(object_map_lock);
> +static DEFINE_RAW_SPINLOCK(object_map_lock);
>  
>  #if IS_ENABLED(CONFIG_KUNIT)
>  static bool slab_add_kunit_errors(void)
> @@ -481,7 +481,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
>  
>  	VM_BUG_ON(!irqs_disabled());
>  
> -	spin_lock(&object_map_lock);
> +	raw_spin_lock(&object_map_lock);
>  
>  	bitmap_zero(object_map, page->objects);
>  
> @@ -494,7 +494,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
>  static void put_map(unsigned long *map) __releases(&object_map_lock)
>  {
>  	VM_BUG_ON(map != object_map);
> -	spin_unlock(&object_map_lock);
> +	raw_spin_unlock(&object_map_lock);
>  }
>  
>  static inline unsigned int size_from_object(struct kmem_cache *s)
>
Andrew Morton May 25, 2021, 11:16 p.m. UTC | #2
On Tue, 25 May 2021 12:46:35 +0200 Vlastimil Babka <vbabka@suse.cz> wrote:

> On 5/25/21 12:23 PM, Yejune Deng wrote:
> > Use DEFINE_RAW_SPINLOCK instead of DEFINE_SPINLOCK object_map_lock
> > that won't be preempted on mainline and PREEMPT_RT kernels.
> > 
> > Signed-off-by: Yejune Deng <yejunedeng@gmail.com>
> 
> RT tree also has such patch, with IMHO more thorough description:

Yes please, a more thorough decription is needed.  The description
provided with this patch could be applied to every spinlock in the
kernel!
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index c2f63c3..995f3d0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -445,7 +445,7 @@  static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 
 #ifdef CONFIG_SLUB_DEBUG
 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
-static DEFINE_SPINLOCK(object_map_lock);
+static DEFINE_RAW_SPINLOCK(object_map_lock);
 
 #if IS_ENABLED(CONFIG_KUNIT)
 static bool slab_add_kunit_errors(void)
@@ -481,7 +481,7 @@  static unsigned long *get_map(struct kmem_cache *s, struct page *page)
 
 	VM_BUG_ON(!irqs_disabled());
 
-	spin_lock(&object_map_lock);
+	raw_spin_lock(&object_map_lock);
 
 	bitmap_zero(object_map, page->objects);
 
@@ -494,7 +494,7 @@  static unsigned long *get_map(struct kmem_cache *s, struct page *page)
 static void put_map(unsigned long *map) __releases(&object_map_lock)
 {
 	VM_BUG_ON(map != object_map);
-	spin_unlock(&object_map_lock);
+	raw_spin_unlock(&object_map_lock);
 }
 
 static inline unsigned int size_from_object(struct kmem_cache *s)