diff mbox series

[RESEND,v2] mm/mm_init: use helper macro BITS_PER_LONG and BITS_PER_BYTE

Message ID 20230807023528.325191-1-linmiaohe@huawei.com (mailing list archive)
State New
Headers show
Series [RESEND,v2] mm/mm_init: use helper macro BITS_PER_LONG and BITS_PER_BYTE | expand

Commit Message

Miaohe Lin Aug. 7, 2023, 2:35 a.m. UTC
It's more readable to use helper macro BITS_PER_LONG and BITS_PER_BYTE.
No functional change intended.

Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
---
v2:
  use BITS_PER_BYTE per Mike.
  Collect Reviewed-by tag per David.
  Thanks both.
---
 mm/mm_init.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Comments

Mike Rapoport Aug. 7, 2023, 5:31 a.m. UTC | #1
On Mon, Aug 07, 2023 at 10:35:28AM +0800, Miaohe Lin wrote:
> It's more readable to use helper macro BITS_PER_LONG and BITS_PER_BYTE.
> No functional change intended.
> 
> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
> Reviewed-by: David Hildenbrand <david@redhat.com>

Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>

> ---
> v2:
>   use BITS_PER_BYTE per Mike.
>   Collect Reviewed-by tag per David.
>   Thanks both.
> ---
>  mm/mm_init.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 66aca3f6accd..93b1febd4a32 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -79,7 +79,7 @@ void __init mminit_verify_pageflags_layout(void)
>  	int shift, width;
>  	unsigned long or_mask, add_mask;
>  
> -	shift = 8 * sizeof(unsigned long);
> +	shift = BITS_PER_LONG;
>  	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
>  		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
>  	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
> @@ -1431,9 +1431,9 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
>  	usemapsize = roundup(zonesize, pageblock_nr_pages);
>  	usemapsize = usemapsize >> pageblock_order;
>  	usemapsize *= NR_PAGEBLOCK_BITS;
> -	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
> +	usemapsize = roundup(usemapsize, BITS_PER_LONG);
>  
> -	return usemapsize / 8;
> +	return usemapsize / BITS_PER_BYTE;
>  }
>  
>  static void __ref setup_usemap(struct zone *zone)
> -- 
> 2.33.0
>
diff mbox series

Patch

diff --git a/mm/mm_init.c b/mm/mm_init.c
index 66aca3f6accd..93b1febd4a32 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -79,7 +79,7 @@  void __init mminit_verify_pageflags_layout(void)
 	int shift, width;
 	unsigned long or_mask, add_mask;
 
-	shift = 8 * sizeof(unsigned long);
+	shift = BITS_PER_LONG;
 	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
 		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
@@ -1431,9 +1431,9 @@  static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
 	usemapsize = roundup(zonesize, pageblock_nr_pages);
 	usemapsize = usemapsize >> pageblock_order;
 	usemapsize *= NR_PAGEBLOCK_BITS;
-	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
+	usemapsize = roundup(usemapsize, BITS_PER_LONG);
 
-	return usemapsize / 8;
+	return usemapsize / BITS_PER_BYTE;
 }
 
 static void __ref setup_usemap(struct zone *zone)