diff mbox series

[v1,mm-unstable] mm/migrate: Remove slab checks in isolate_movable_page()

Message ID 20241210124807.8584-1-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series [v1,mm-unstable] mm/migrate: Remove slab checks in isolate_movable_page() | expand

Commit Message

Hyeonggon Yoo Dec. 10, 2024, 12:48 p.m. UTC
Commit 8b8817630ae8 ("mm/migrate: make isolate_movable_page() skip slab
pages") introduced slab checks to prevent mis-identification of slab
pages as movable kernel pages.

However, after Matthew's frozen folio series, these slab checks became
unnecessary as the migration logic fails to increase the reference count
for frozen slab folios. Remove these redundant slab checks and associated
memory barriers.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/migrate.c | 8 --------
 mm/slub.c    | 4 ----
 2 files changed, 12 deletions(-)

Comments

David Hildenbrand Dec. 10, 2024, 2:27 p.m. UTC | #1
On 10.12.24 13:48, Hyeonggon Yoo wrote:
> Commit 8b8817630ae8 ("mm/migrate: make isolate_movable_page() skip slab
> pages") introduced slab checks to prevent mis-identification of slab
> pages as movable kernel pages.
> 
> However, after Matthew's frozen folio series, these slab checks became
> unnecessary as the migration logic fails to increase the reference count
> for frozen slab folios. Remove these redundant slab checks and associated
> memory barriers.
> 
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> ---
>   mm/migrate.c | 8 --------
>   mm/slub.c    | 4 ----
>   2 files changed, 12 deletions(-)
> 
> diff --git a/mm/migrate.c b/mm/migrate.c
> index e9e00d1d1d19..32cc8e0b1cce 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -68,10 +68,6 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>   	if (!folio)
>   		goto out;
>   
> -	if (unlikely(folio_test_slab(folio)))
> -		goto out_putfolio;
> -	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
> -	smp_rmb();
>   	/*
>   	 * Check movable flag before taking the page lock because
>   	 * we use non-atomic bitops on newly allocated page flags so
> @@ -79,10 +75,6 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>   	 */
>   	if (unlikely(!__folio_test_movable(folio)))
>   		goto out_putfolio;
> -	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
> -	smp_rmb();
> -	if (unlikely(folio_test_slab(folio)))
> -		goto out_putfolio;
>   
>   	/*
>   	 * As movable pages are not isolated from LRU lists, concurrent
> diff --git a/mm/slub.c b/mm/slub.c
> index f62c829b7b6b..ee94baba12f7 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2414,8 +2414,6 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
>   
>   	slab = folio_slab(folio);
>   	__folio_set_slab(folio);
> -	/* Make the flag visible before any changes to folio->mapping */
> -	smp_wmb();
>   	if (folio_is_pfmemalloc(folio))
>   		slab_set_pfmemalloc(slab);
>   
> @@ -2636,8 +2634,6 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
>   
>   	__slab_clear_pfmemalloc(slab);
>   	folio->mapping = NULL;
> -	/* Make the mapping reset visible before clearing the flag */
> -	smp_wmb();
>   	__folio_clear_slab(folio);
>   	mm_account_reclaimed_pages(pages);
>   	unaccount_slab(slab, order, s);


Nice

Acked-by: David Hildenbrand <david@redhat.com>
Vlastimil Babka Dec. 11, 2024, 8:10 a.m. UTC | #2
On 12/10/24 13:48, Hyeonggon Yoo wrote:
> Commit 8b8817630ae8 ("mm/migrate: make isolate_movable_page() skip slab
> pages") introduced slab checks to prevent mis-identification of slab
> pages as movable kernel pages.
> 
> However, after Matthew's frozen folio series, these slab checks became
> unnecessary as the migration logic fails to increase the reference count
> for frozen slab folios. Remove these redundant slab checks and associated
> memory barriers.
> 
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>

Thanks!
diff mbox series

Patch

diff --git a/mm/migrate.c b/mm/migrate.c
index e9e00d1d1d19..32cc8e0b1cce 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -68,10 +68,6 @@  bool isolate_movable_page(struct page *page, isolate_mode_t mode)
 	if (!folio)
 		goto out;
 
-	if (unlikely(folio_test_slab(folio)))
-		goto out_putfolio;
-	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
-	smp_rmb();
 	/*
 	 * Check movable flag before taking the page lock because
 	 * we use non-atomic bitops on newly allocated page flags so
@@ -79,10 +75,6 @@  bool isolate_movable_page(struct page *page, isolate_mode_t mode)
 	 */
 	if (unlikely(!__folio_test_movable(folio)))
 		goto out_putfolio;
-	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
-	smp_rmb();
-	if (unlikely(folio_test_slab(folio)))
-		goto out_putfolio;
 
 	/*
 	 * As movable pages are not isolated from LRU lists, concurrent
diff --git a/mm/slub.c b/mm/slub.c
index f62c829b7b6b..ee94baba12f7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2414,8 +2414,6 @@  static inline struct slab *alloc_slab_page(gfp_t flags, int node,
 
 	slab = folio_slab(folio);
 	__folio_set_slab(folio);
-	/* Make the flag visible before any changes to folio->mapping */
-	smp_wmb();
 	if (folio_is_pfmemalloc(folio))
 		slab_set_pfmemalloc(slab);
 
@@ -2636,8 +2634,6 @@  static void __free_slab(struct kmem_cache *s, struct slab *slab)
 
 	__slab_clear_pfmemalloc(slab);
 	folio->mapping = NULL;
-	/* Make the mapping reset visible before clearing the flag */
-	smp_wmb();
 	__folio_clear_slab(folio);
 	mm_account_reclaimed_pages(pages);
 	unaccount_slab(slab, order, s);