diff mbox series

mm: Reduce the number of slab->folio casts

Message ID 20240531122904.2790052-1-willy@infradead.org (mailing list archive)
State New
Headers show
Series mm: Reduce the number of slab->folio casts | expand

Commit Message

Matthew Wilcox May 31, 2024, 12:29 p.m. UTC
Mark a few more folio functions as taking a const folio pointer, which
allows us to remove a few places in slab which cast away the const.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/mm.h | 6 +++---
 mm/slab.h          | 4 ++--
 mm/slub.c          | 6 ++----
 3 files changed, 7 insertions(+), 9 deletions(-)

Comments

Vlastimil Babka May 31, 2024, 2:02 p.m. UTC | #1
On 5/31/24 2:29 PM, Matthew Wilcox (Oracle) wrote:
> Mark a few more folio functions as taking a const folio pointer, which
> allows us to remove a few places in slab which cast away the const.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Added to slab/for-6.11/cleanups, at least for now.
If there's more incoming outside of slab that needs the changed stuff from
mm.h, we might need to move it.

Thanks.

> ---
>  include/linux/mm.h | 6 +++---
>  mm/slab.h          | 4 ++--
>  mm/slub.c          | 6 ++----
>  3 files changed, 7 insertions(+), 9 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index a20a6be730a1..4bb841086438 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1105,7 +1105,7 @@ static inline unsigned int compound_order(struct page *page)
>   *
>   * Return: The order of the folio.
>   */
> -static inline unsigned int folio_order(struct folio *folio)
> +static inline unsigned int folio_order(const struct folio *folio)
>  {
>  	if (!folio_test_large(folio))
>  		return 0;
> @@ -2148,7 +2148,7 @@ static inline struct folio *folio_next(struct folio *folio)
>   * it from being split.  It is not necessary for the folio to be locked.
>   * Return: The base-2 logarithm of the size of this folio.
>   */
> -static inline unsigned int folio_shift(struct folio *folio)
> +static inline unsigned int folio_shift(const struct folio *folio)
>  {
>  	return PAGE_SHIFT + folio_order(folio);
>  }
> @@ -2161,7 +2161,7 @@ static inline unsigned int folio_shift(struct folio *folio)
>   * it from being split.  It is not necessary for the folio to be locked.
>   * Return: The number of bytes in this folio.
>   */
> -static inline size_t folio_size(struct folio *folio)
> +static inline size_t folio_size(const struct folio *folio)
>  {
>  	return PAGE_SIZE << folio_order(folio);
>  }
> diff --git a/mm/slab.h b/mm/slab.h
> index 5f8f47c5bee0..b16e63191578 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -166,7 +166,7 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)
>   */
>  static inline bool slab_test_pfmemalloc(const struct slab *slab)
>  {
> -	return folio_test_active((struct folio *)slab_folio(slab));
> +	return folio_test_active(slab_folio(slab));
>  }
>  
>  static inline void slab_set_pfmemalloc(struct slab *slab)
> @@ -211,7 +211,7 @@ static inline struct slab *virt_to_slab(const void *addr)
>  
>  static inline int slab_order(const struct slab *slab)
>  {
> -	return folio_order((struct folio *)slab_folio(slab));
> +	return folio_order(slab_folio(slab));
>  }
>  
>  static inline size_t slab_size(const struct slab *slab)
> diff --git a/mm/slub.c b/mm/slub.c
> index 95e0a3332c44..b8ba068ca079 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -962,11 +962,9 @@ void print_tracking(struct kmem_cache *s, void *object)
>  
>  static void print_slab_info(const struct slab *slab)
>  {
> -	struct folio *folio = (struct folio *)slab_folio(slab);
> -
>  	pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
>  	       slab, slab->objects, slab->inuse, slab->freelist,
> -	       folio_flags(folio, 0));
> +	       &slab->__page_flags);
>  }
>  
>  /*
> @@ -2532,7 +2530,7 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab)
>   */
>  static inline bool slab_test_node_partial(const struct slab *slab)
>  {
> -	return folio_test_workingset((struct folio *)slab_folio(slab));
> +	return folio_test_workingset(slab_folio(slab));
>  }
>  
>  static inline void slab_set_node_partial(struct slab *slab)
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index a20a6be730a1..4bb841086438 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1105,7 +1105,7 @@  static inline unsigned int compound_order(struct page *page)
  *
  * Return: The order of the folio.
  */
-static inline unsigned int folio_order(struct folio *folio)
+static inline unsigned int folio_order(const struct folio *folio)
 {
 	if (!folio_test_large(folio))
 		return 0;
@@ -2148,7 +2148,7 @@  static inline struct folio *folio_next(struct folio *folio)
  * it from being split.  It is not necessary for the folio to be locked.
  * Return: The base-2 logarithm of the size of this folio.
  */
-static inline unsigned int folio_shift(struct folio *folio)
+static inline unsigned int folio_shift(const struct folio *folio)
 {
 	return PAGE_SHIFT + folio_order(folio);
 }
@@ -2161,7 +2161,7 @@  static inline unsigned int folio_shift(struct folio *folio)
  * it from being split.  It is not necessary for the folio to be locked.
  * Return: The number of bytes in this folio.
  */
-static inline size_t folio_size(struct folio *folio)
+static inline size_t folio_size(const struct folio *folio)
 {
 	return PAGE_SIZE << folio_order(folio);
 }
diff --git a/mm/slab.h b/mm/slab.h
index 5f8f47c5bee0..b16e63191578 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -166,7 +166,7 @@  static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)
  */
 static inline bool slab_test_pfmemalloc(const struct slab *slab)
 {
-	return folio_test_active((struct folio *)slab_folio(slab));
+	return folio_test_active(slab_folio(slab));
 }
 
 static inline void slab_set_pfmemalloc(struct slab *slab)
@@ -211,7 +211,7 @@  static inline struct slab *virt_to_slab(const void *addr)
 
 static inline int slab_order(const struct slab *slab)
 {
-	return folio_order((struct folio *)slab_folio(slab));
+	return folio_order(slab_folio(slab));
 }
 
 static inline size_t slab_size(const struct slab *slab)
diff --git a/mm/slub.c b/mm/slub.c
index 95e0a3332c44..b8ba068ca079 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -962,11 +962,9 @@  void print_tracking(struct kmem_cache *s, void *object)
 
 static void print_slab_info(const struct slab *slab)
 {
-	struct folio *folio = (struct folio *)slab_folio(slab);
-
 	pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
 	       slab, slab->objects, slab->inuse, slab->freelist,
-	       folio_flags(folio, 0));
+	       &slab->__page_flags);
 }
 
 /*
@@ -2532,7 +2530,7 @@  static void discard_slab(struct kmem_cache *s, struct slab *slab)
  */
 static inline bool slab_test_node_partial(const struct slab *slab)
 {
-	return folio_test_workingset((struct folio *)slab_folio(slab));
+	return folio_test_workingset(slab_folio(slab));
 }
 
 static inline void slab_set_node_partial(struct slab *slab)