diff mbox series

mm/sparsemem: Cleanup 'section number' data types

Message ID 156107543656.1329419.11505835211949439815.stgit@dwillia2-desk3.amr.corp.intel.com (mailing list archive)
State New, archived
Headers show
Series mm/sparsemem: Cleanup 'section number' data types | expand

Commit Message

Dan Williams June 21, 2019, 12:06 a.m. UTC
David points out that there is a mixture of 'int' and 'unsigned long'
usage for section number data types. Update the memory hotplug path to
use 'unsigned long' consistently for section numbers.

Cc: Michal Hocko <mhocko@suse.com>
Cc: Oscar Salvador <osalvador@suse.de>
Reported-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
Hi Andrew,

This patch belatedly fixes up David's review feedback about moving over
to 'unsigned long' for section numbers. Let me know if you want me to
respin the full series, or if you'll just apply / fold this patch on
top.

 mm/memory_hotplug.c |   10 +++++-----
 mm/sparse.c         |    8 ++++----
 2 files changed, 9 insertions(+), 9 deletions(-)

Comments

David Hildenbrand June 21, 2019, 7:23 a.m. UTC | #1
On 21.06.19 02:06, Dan Williams wrote:
> David points out that there is a mixture of 'int' and 'unsigned long'
> usage for section number data types. Update the memory hotplug path to
> use 'unsigned long' consistently for section numbers.
> 
> Cc: Michal Hocko <mhocko@suse.com>
> Cc: Oscar Salvador <osalvador@suse.de>
> Reported-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> ---
> Hi Andrew,
> 
> This patch belatedly fixes up David's review feedback about moving over
> to 'unsigned long' for section numbers. Let me know if you want me to
> respin the full series, or if you'll just apply / fold this patch on
> top.
> 
>  mm/memory_hotplug.c |   10 +++++-----
>  mm/sparse.c         |    8 ++++----
>  2 files changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 4e8e65954f31..92bc44a73fc5 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -288,8 +288,8 @@ static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
>  int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
>  		struct mhp_restrictions *restrictions)
>  {
> -	unsigned long i;
> -	int start_sec, end_sec, err;
> +	int err;
> +	unsigned long nr, start_sec, end_sec;
>  	struct vmem_altmap *altmap = restrictions->altmap;
>  
>  	if (altmap) {
> @@ -310,7 +310,7 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
>  
>  	start_sec = pfn_to_section_nr(pfn);
>  	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
> -	for (i = start_sec; i <= end_sec; i++) {
> +	for (nr = start_sec; nr <= end_sec; nr++) {
>  		unsigned long pfns;
>  
>  		pfns = min(nr_pages, PAGES_PER_SECTION
> @@ -541,7 +541,7 @@ void __remove_pages(struct zone *zone, unsigned long pfn,
>  		    unsigned long nr_pages, struct vmem_altmap *altmap)
>  {
>  	unsigned long map_offset = 0;
> -	int i, start_sec, end_sec;
> +	unsigned long nr, start_sec, end_sec;
>  
>  	if (altmap)
>  		map_offset = vmem_altmap_offset(altmap);
> @@ -553,7 +553,7 @@ void __remove_pages(struct zone *zone, unsigned long pfn,
>  
>  	start_sec = pfn_to_section_nr(pfn);
>  	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
> -	for (i = start_sec; i <= end_sec; i++) {
> +	for (nr = start_sec; nr <= end_sec; nr++) {
>  		unsigned long pfns;
>  
>  		cond_resched();
> diff --git a/mm/sparse.c b/mm/sparse.c
> index b77ca21a27a4..6c4eab2b2bb0 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -229,21 +229,21 @@ void subsection_mask_set(unsigned long *map, unsigned long pfn,
>  void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
>  {
>  	int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
> -	int i, start_sec = pfn_to_section_nr(pfn);
> +	unsigned long nr, start_sec = pfn_to_section_nr(pfn);
>  
>  	if (!nr_pages)
>  		return;
>  
> -	for (i = start_sec; i <= end_sec; i++) {
> +	for (nr = start_sec; nr <= end_sec; nr++) {
>  		struct mem_section *ms;
>  		unsigned long pfns;
>  
>  		pfns = min(nr_pages, PAGES_PER_SECTION
>  				- (pfn & ~PAGE_SECTION_MASK));
> -		ms = __nr_to_section(i);
> +		ms = __nr_to_section(nr);
>  		subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
>  
> -		pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, i,
> +		pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, nr,
>  				pfns, subsection_map_index(pfn),
>  				subsection_map_index(pfn + pfns - 1));
>  
> 

Thanks Dan!

Reviewed-by: David Hildenbrand <david@redhat.com>
Matthew Wilcox (Oracle) June 21, 2019, 2:58 p.m. UTC | #2
On Thu, Jun 20, 2019 at 05:06:46PM -0700, Dan Williams wrote:
> David points out that there is a mixture of 'int' and 'unsigned long'
> usage for section number data types. Update the memory hotplug path to
> use 'unsigned long' consistently for section numbers.

... because we're seriously considering the possibility that we'll need
more than 4 billion sections?
David Hildenbrand June 21, 2019, 3:04 p.m. UTC | #3
On 21.06.19 16:58, Matthew Wilcox wrote:
> On Thu, Jun 20, 2019 at 05:06:46PM -0700, Dan Williams wrote:
>> David points out that there is a mixture of 'int' and 'unsigned long'
>> usage for section number data types. Update the memory hotplug path to
>> use 'unsigned long' consistently for section numbers.
> 
> ... because we're seriously considering the possibility that we'll need
> more than 4 billion sections?
> 

To make it consistent ;)
diff mbox series

Patch

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 4e8e65954f31..92bc44a73fc5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -288,8 +288,8 @@  static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
 		struct mhp_restrictions *restrictions)
 {
-	unsigned long i;
-	int start_sec, end_sec, err;
+	int err;
+	unsigned long nr, start_sec, end_sec;
 	struct vmem_altmap *altmap = restrictions->altmap;
 
 	if (altmap) {
@@ -310,7 +310,7 @@  int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
 
 	start_sec = pfn_to_section_nr(pfn);
 	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-	for (i = start_sec; i <= end_sec; i++) {
+	for (nr = start_sec; nr <= end_sec; nr++) {
 		unsigned long pfns;
 
 		pfns = min(nr_pages, PAGES_PER_SECTION
@@ -541,7 +541,7 @@  void __remove_pages(struct zone *zone, unsigned long pfn,
 		    unsigned long nr_pages, struct vmem_altmap *altmap)
 {
 	unsigned long map_offset = 0;
-	int i, start_sec, end_sec;
+	unsigned long nr, start_sec, end_sec;
 
 	if (altmap)
 		map_offset = vmem_altmap_offset(altmap);
@@ -553,7 +553,7 @@  void __remove_pages(struct zone *zone, unsigned long pfn,
 
 	start_sec = pfn_to_section_nr(pfn);
 	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-	for (i = start_sec; i <= end_sec; i++) {
+	for (nr = start_sec; nr <= end_sec; nr++) {
 		unsigned long pfns;
 
 		cond_resched();
diff --git a/mm/sparse.c b/mm/sparse.c
index b77ca21a27a4..6c4eab2b2bb0 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -229,21 +229,21 @@  void subsection_mask_set(unsigned long *map, unsigned long pfn,
 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
 {
 	int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-	int i, start_sec = pfn_to_section_nr(pfn);
+	unsigned long nr, start_sec = pfn_to_section_nr(pfn);
 
 	if (!nr_pages)
 		return;
 
-	for (i = start_sec; i <= end_sec; i++) {
+	for (nr = start_sec; nr <= end_sec; nr++) {
 		struct mem_section *ms;
 		unsigned long pfns;
 
 		pfns = min(nr_pages, PAGES_PER_SECTION
 				- (pfn & ~PAGE_SECTION_MASK));
-		ms = __nr_to_section(i);
+		ms = __nr_to_section(nr);
 		subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
 
-		pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, i,
+		pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, nr,
 				pfns, subsection_map_index(pfn),
 				subsection_map_index(pfn + pfns - 1));