Message ID | 20190320075301.13994-1-bhe@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm/sparse: Rename function related to section memmap allocation/free | expand |
On 03/20/19 at 03:53pm, Baoquan He wrote: > These functions are used allocate/free section memmap, have nothing ^ 'to' missed here, will update later. > to do with kmalloc/free during the handling. Rename them to remove > the confusion. > > Signed-off-by: Baoquan He <bhe@redhat.com> > --- > mm/sparse.c | 18 +++++++++--------- > 1 file changed, 9 insertions(+), 9 deletions(-) > > diff --git a/mm/sparse.c b/mm/sparse.c > index 054b99f74181..374206212d01 100644 > --- a/mm/sparse.c > +++ b/mm/sparse.c > @@ -579,13 +579,13 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) > #endif > > #ifdef CONFIG_SPARSEMEM_VMEMMAP > -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, > +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, > struct vmem_altmap *altmap) > { > /* This will make the necessary allocations eventually. */ > return sparse_mem_map_populate(pnum, nid, altmap); > } > -static void __kfree_section_memmap(struct page *memmap, > +static void __free_section_memmap(struct page *memmap, > struct vmem_altmap *altmap) > { > unsigned long start = (unsigned long)memmap; > @@ -603,7 +603,7 @@ static void free_map_bootmem(struct page *memmap) > } > #endif /* CONFIG_MEMORY_HOTREMOVE */ > #else > -static struct page *__kmalloc_section_memmap(void) > +static struct page *__alloc_section_memmap(void) > { > struct page *page, *ret; > unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; > @@ -624,13 +624,13 @@ static struct page *__kmalloc_section_memmap(void) > return ret; > } > > -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, > +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, > struct vmem_altmap *altmap) > { > - return __kmalloc_section_memmap(); > + return __alloc_section_memmap(); > } > > -static void __kfree_section_memmap(struct page *memmap, > +static void __free_section_memmap(struct page *memmap, > struct vmem_altmap *altmap) > { > if (is_vmalloc_addr(memmap)) > @@ -701,7 +701,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, > usemap = __kmalloc_section_usemap(); > if (!usemap) > return -ENOMEM; > - memmap = kmalloc_section_memmap(section_nr, nid, altmap); > + memmap = alloc_section_memmap(section_nr, nid, altmap); > if (!memmap) { > kfree(usemap); > return -ENOMEM; > @@ -726,7 +726,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, > out: > if (ret < 0) { > kfree(usemap); > - __kfree_section_memmap(memmap, altmap); > + __free_section_memmap(memmap, altmap); > } > return ret; > } > @@ -777,7 +777,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap, > if (PageSlab(usemap_page) || PageCompound(usemap_page)) { > kfree(usemap); > if (memmap) > - __kfree_section_memmap(memmap, altmap); > + __free_section_memmap(memmap, altmap); > return; > } > > -- > 2.17.2 >
On 03/20/19 at 03:53pm, Baoquan He wrote: > These functions are used allocate/free section memmap, have nothing > to do with kmalloc/free during the handling. Rename them to remove > the confusion. Sorry, wrong git operation caused this one sent. I intended to send out other single patch. Please ignore this one. It has been included in previous patchset. > > Signed-off-by: Baoquan He <bhe@redhat.com> > --- > mm/sparse.c | 18 +++++++++--------- > 1 file changed, 9 insertions(+), 9 deletions(-) > > diff --git a/mm/sparse.c b/mm/sparse.c > index 054b99f74181..374206212d01 100644 > --- a/mm/sparse.c > +++ b/mm/sparse.c > @@ -579,13 +579,13 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) > #endif > > #ifdef CONFIG_SPARSEMEM_VMEMMAP > -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, > +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, > struct vmem_altmap *altmap) > { > /* This will make the necessary allocations eventually. */ > return sparse_mem_map_populate(pnum, nid, altmap); > } > -static void __kfree_section_memmap(struct page *memmap, > +static void __free_section_memmap(struct page *memmap, > struct vmem_altmap *altmap) > { > unsigned long start = (unsigned long)memmap; > @@ -603,7 +603,7 @@ static void free_map_bootmem(struct page *memmap) > } > #endif /* CONFIG_MEMORY_HOTREMOVE */ > #else > -static struct page *__kmalloc_section_memmap(void) > +static struct page *__alloc_section_memmap(void) > { > struct page *page, *ret; > unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; > @@ -624,13 +624,13 @@ static struct page *__kmalloc_section_memmap(void) > return ret; > } > > -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, > +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, > struct vmem_altmap *altmap) > { > - return __kmalloc_section_memmap(); > + return __alloc_section_memmap(); > } > > -static void __kfree_section_memmap(struct page *memmap, > +static void __free_section_memmap(struct page *memmap, > struct vmem_altmap *altmap) > { > if (is_vmalloc_addr(memmap)) > @@ -701,7 +701,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, > usemap = __kmalloc_section_usemap(); > if (!usemap) > return -ENOMEM; > - memmap = kmalloc_section_memmap(section_nr, nid, altmap); > + memmap = alloc_section_memmap(section_nr, nid, altmap); > if (!memmap) { > kfree(usemap); > return -ENOMEM; > @@ -726,7 +726,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, > out: > if (ret < 0) { > kfree(usemap); > - __kfree_section_memmap(memmap, altmap); > + __free_section_memmap(memmap, altmap); > } > return ret; > } > @@ -777,7 +777,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap, > if (PageSlab(usemap_page) || PageCompound(usemap_page)) { > kfree(usemap); > if (memmap) > - __kfree_section_memmap(memmap, altmap); > + __free_section_memmap(memmap, altmap); > return; > } > > -- > 2.17.2 >
On Wed, Mar 20, 2019 at 03:53:01PM +0800, Baoquan He wrote: > These functions are used allocate/free section memmap, have nothing > to do with kmalloc/free during the handling. Rename them to remove > the confusion. > > Signed-off-by: Baoquan He <bhe@redhat.com> Acked-by: Mike Rapoport <rppt@linux.ibm.com> > --- > mm/sparse.c | 18 +++++++++--------- > 1 file changed, 9 insertions(+), 9 deletions(-) > > diff --git a/mm/sparse.c b/mm/sparse.c > index 054b99f74181..374206212d01 100644 > --- a/mm/sparse.c > +++ b/mm/sparse.c > @@ -579,13 +579,13 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) > #endif > > #ifdef CONFIG_SPARSEMEM_VMEMMAP > -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, > +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, > struct vmem_altmap *altmap) > { > /* This will make the necessary allocations eventually. */ > return sparse_mem_map_populate(pnum, nid, altmap); > } > -static void __kfree_section_memmap(struct page *memmap, > +static void __free_section_memmap(struct page *memmap, > struct vmem_altmap *altmap) > { > unsigned long start = (unsigned long)memmap; > @@ -603,7 +603,7 @@ static void free_map_bootmem(struct page *memmap) > } > #endif /* CONFIG_MEMORY_HOTREMOVE */ > #else > -static struct page *__kmalloc_section_memmap(void) > +static struct page *__alloc_section_memmap(void) > { > struct page *page, *ret; > unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; > @@ -624,13 +624,13 @@ static struct page *__kmalloc_section_memmap(void) > return ret; > } > > -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, > +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, > struct vmem_altmap *altmap) > { > - return __kmalloc_section_memmap(); > + return __alloc_section_memmap(); > } > > -static void __kfree_section_memmap(struct page *memmap, > +static void __free_section_memmap(struct page *memmap, > struct vmem_altmap *altmap) > { > if (is_vmalloc_addr(memmap)) > @@ -701,7 +701,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, > usemap = __kmalloc_section_usemap(); > if (!usemap) > return -ENOMEM; > - memmap = kmalloc_section_memmap(section_nr, nid, altmap); > + memmap = alloc_section_memmap(section_nr, nid, altmap); > if (!memmap) { > kfree(usemap); > return -ENOMEM; > @@ -726,7 +726,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, > out: > if (ret < 0) { > kfree(usemap); > - __kfree_section_memmap(memmap, altmap); > + __free_section_memmap(memmap, altmap); > } > return ret; > } > @@ -777,7 +777,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap, > if (PageSlab(usemap_page) || PageCompound(usemap_page)) { > kfree(usemap); > if (memmap) > - __kfree_section_memmap(memmap, altmap); > + __free_section_memmap(memmap, altmap); > return; > } > > -- > 2.17.2 >
On 03/20/19 at 09:59am, Mike Rapoport wrote: > On Wed, Mar 20, 2019 at 03:53:01PM +0800, Baoquan He wrote: > > These functions are used allocate/free section memmap, have nothing > > to do with kmalloc/free during the handling. Rename them to remove > > the confusion. > > > > Signed-off-by: Baoquan He <bhe@redhat.com> > > Acked-by: Mike Rapoport <rppt@linux.ibm.com> Thanks for reviewing, Mike. I makde mistake to send this one twice. You can see it has been added into the patchset. Anyway, I will add your 'Acked-by' when repost to address those issues you pointed out. Thanks Baoquan > > > --- > > mm/sparse.c | 18 +++++++++--------- > > 1 file changed, 9 insertions(+), 9 deletions(-) > > > > diff --git a/mm/sparse.c b/mm/sparse.c > > index 054b99f74181..374206212d01 100644 > > --- a/mm/sparse.c > > +++ b/mm/sparse.c > > @@ -579,13 +579,13 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) > > #endif > > > > #ifdef CONFIG_SPARSEMEM_VMEMMAP > > -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, > > +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, > > struct vmem_altmap *altmap) > > { > > /* This will make the necessary allocations eventually. */ > > return sparse_mem_map_populate(pnum, nid, altmap); > > } > > -static void __kfree_section_memmap(struct page *memmap, > > +static void __free_section_memmap(struct page *memmap, > > struct vmem_altmap *altmap) > > { > > unsigned long start = (unsigned long)memmap; > > @@ -603,7 +603,7 @@ static void free_map_bootmem(struct page *memmap) > > } > > #endif /* CONFIG_MEMORY_HOTREMOVE */ > > #else > > -static struct page *__kmalloc_section_memmap(void) > > +static struct page *__alloc_section_memmap(void) > > { > > struct page *page, *ret; > > unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; > > @@ -624,13 +624,13 @@ static struct page *__kmalloc_section_memmap(void) > > return ret; > > } > > > > -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, > > +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, > > struct vmem_altmap *altmap) > > { > > - return __kmalloc_section_memmap(); > > + return __alloc_section_memmap(); > > } > > > > -static void __kfree_section_memmap(struct page *memmap, > > +static void __free_section_memmap(struct page *memmap, > > struct vmem_altmap *altmap) > > { > > if (is_vmalloc_addr(memmap)) > > @@ -701,7 +701,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, > > usemap = __kmalloc_section_usemap(); > > if (!usemap) > > return -ENOMEM; > > - memmap = kmalloc_section_memmap(section_nr, nid, altmap); > > + memmap = alloc_section_memmap(section_nr, nid, altmap); > > if (!memmap) { > > kfree(usemap); > > return -ENOMEM; > > @@ -726,7 +726,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, > > out: > > if (ret < 0) { > > kfree(usemap); > > - __kfree_section_memmap(memmap, altmap); > > + __free_section_memmap(memmap, altmap); > > } > > return ret; > > } > > @@ -777,7 +777,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap, > > if (PageSlab(usemap_page) || PageCompound(usemap_page)) { > > kfree(usemap); > > if (memmap) > > - __kfree_section_memmap(memmap, altmap); > > + __free_section_memmap(memmap, altmap); > > return; > > } > > > > -- > > 2.17.2 > > > > -- > Sincerely yours, > Mike. >
diff --git a/mm/sparse.c b/mm/sparse.c index 054b99f74181..374206212d01 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -579,13 +579,13 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, struct vmem_altmap *altmap) { /* This will make the necessary allocations eventually. */ return sparse_mem_map_populate(pnum, nid, altmap); } -static void __kfree_section_memmap(struct page *memmap, +static void __free_section_memmap(struct page *memmap, struct vmem_altmap *altmap) { unsigned long start = (unsigned long)memmap; @@ -603,7 +603,7 @@ static void free_map_bootmem(struct page *memmap) } #endif /* CONFIG_MEMORY_HOTREMOVE */ #else -static struct page *__kmalloc_section_memmap(void) +static struct page *__alloc_section_memmap(void) { struct page *page, *ret; unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; @@ -624,13 +624,13 @@ static struct page *__kmalloc_section_memmap(void) return ret; } -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, struct vmem_altmap *altmap) { - return __kmalloc_section_memmap(); + return __alloc_section_memmap(); } -static void __kfree_section_memmap(struct page *memmap, +static void __free_section_memmap(struct page *memmap, struct vmem_altmap *altmap) { if (is_vmalloc_addr(memmap)) @@ -701,7 +701,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, usemap = __kmalloc_section_usemap(); if (!usemap) return -ENOMEM; - memmap = kmalloc_section_memmap(section_nr, nid, altmap); + memmap = alloc_section_memmap(section_nr, nid, altmap); if (!memmap) { kfree(usemap); return -ENOMEM; @@ -726,7 +726,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, out: if (ret < 0) { kfree(usemap); - __kfree_section_memmap(memmap, altmap); + __free_section_memmap(memmap, altmap); } return ret; } @@ -777,7 +777,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap, if (PageSlab(usemap_page) || PageCompound(usemap_page)) { kfree(usemap); if (memmap) - __kfree_section_memmap(memmap, altmap); + __free_section_memmap(memmap, altmap); return; }
These functions are used allocate/free section memmap, have nothing to do with kmalloc/free during the handling. Rename them to remove the confusion. Signed-off-by: Baoquan He <bhe@redhat.com> --- mm/sparse.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-)