diff mbox series

[2/2] mm: memory_hotplug: cleanup after removal of pfn_valid_within()

Message ID 20210713080035.7464-3-rppt@kernel.org (mailing list archive)
State New
Headers show
Series mm: remove pfn_valid_within() and CONFIG_HOLES_IN_ZONE | expand

Commit Message

Mike Rapoport July 13, 2021, 8 a.m. UTC
From: Mike Rapoport <rppt@linux.ibm.com>

When test_pages_in_a_zone() used pfn_valid_within() is has some logic
surrounding pfn_valid_within() checks.

Since pfn_valid_within() is gone, this logic can be removed.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
---
 mm/memory_hotplug.c | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)

Comments

David Hildenbrand July 13, 2021, 9:54 a.m. UTC | #1
On 13.07.21 10:00, Mike Rapoport wrote:
> From: Mike Rapoport <rppt@linux.ibm.com>
> 
> When test_pages_in_a_zone() used pfn_valid_within() is has some logic
> surrounding pfn_valid_within() checks.
> 
> Since pfn_valid_within() is gone, this logic can be removed.
> 
> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> ---
>   mm/memory_hotplug.c | 9 +++------
>   1 file changed, 3 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 1085bd03ecba..ca09045bb587 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1298,7 +1298,7 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
>   	unsigned long pfn, sec_end_pfn;
>   	struct zone *zone = NULL;
>   	struct page *page;
> -	int i;
> +
>   	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
>   	     pfn < end_pfn;
>   	     pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
> @@ -1307,13 +1307,10 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
>   			continue;
>   		for (; pfn < sec_end_pfn && pfn < end_pfn;
>   		     pfn += MAX_ORDER_NR_PAGES) {
> -			i = 0;
> -			if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
> -				continue;
>   			/* Check if we got outside of the zone */
> -			if (zone && !zone_spans_pfn(zone, pfn + i))
> +			if (zone && !zone_spans_pfn(zone, pfn))
>   				return NULL;
> -			page = pfn_to_page(pfn + i);
> +			page = pfn_to_page(pfn);
>   			if (zone && page_zone(page) != zone)
>   				return NULL;
>   			zone = page_zone(page);
> 

I'd just squash that into the previous commit.

Reviewed-by: David Hildenbrand <david@redhat.com>
diff mbox series

Patch

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1085bd03ecba..ca09045bb587 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1298,7 +1298,7 @@  struct zone *test_pages_in_a_zone(unsigned long start_pfn,
 	unsigned long pfn, sec_end_pfn;
 	struct zone *zone = NULL;
 	struct page *page;
-	int i;
+
 	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
 	     pfn < end_pfn;
 	     pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
@@ -1307,13 +1307,10 @@  struct zone *test_pages_in_a_zone(unsigned long start_pfn,
 			continue;
 		for (; pfn < sec_end_pfn && pfn < end_pfn;
 		     pfn += MAX_ORDER_NR_PAGES) {
-			i = 0;
-			if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
-				continue;
 			/* Check if we got outside of the zone */
-			if (zone && !zone_spans_pfn(zone, pfn + i))
+			if (zone && !zone_spans_pfn(zone, pfn))
 				return NULL;
-			page = pfn_to_page(pfn + i);
+			page = pfn_to_page(pfn);
 			if (zone && page_zone(page) != zone)
 				return NULL;
 			zone = page_zone(page);