diff mbox series

[2/7] mm/sparse.c: Introduce a new function clear_subsection_map()

Message ID 20200209104826.3385-3-bhe@redhat.com (mailing list archive)
State New, archived
Headers show
Series mm/hotplug: Only use subsection in VMEMMAP case and fix hot add/remove failure in SPARSEMEM|!VMEMMAP case | expand

Commit Message

Baoquan He Feb. 9, 2020, 10:48 a.m. UTC
Wrap the codes clearing subsection map of one memory region in
section_deactivate() into clear_subsection_map().

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/sparse.c | 44 +++++++++++++++++++++++++++++++++++++-------
 1 file changed, 37 insertions(+), 7 deletions(-)

Comments

Wei Yang Feb. 9, 2020, 11:07 p.m. UTC | #1
On Sun, Feb 09, 2020 at 06:48:21PM +0800, Baoquan He wrote:
>Wrap the codes clearing subsection map of one memory region in
>section_deactivate() into clear_subsection_map().
>

Patch 1 and 2 server the same purpose -- to #ifdef the VMEMMAP.

I suggest to merge these two.

>Signed-off-by: Baoquan He <bhe@redhat.com>
>---
> mm/sparse.c | 44 +++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 37 insertions(+), 7 deletions(-)
>
>diff --git a/mm/sparse.c b/mm/sparse.c
>index 9ad741ccbeb6..696f6b9f706e 100644
>--- a/mm/sparse.c
>+++ b/mm/sparse.c
>@@ -726,14 +726,25 @@ static void free_map_bootmem(struct page *memmap)
> }
> #endif /* CONFIG_SPARSEMEM_VMEMMAP */
> 
>-static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
>-		struct vmem_altmap *altmap)
>+/**
>+ * clear_subsection_map - Clear subsection map of one memory region
>+ *
>+ * @pfn - start pfn of the memory range
>+ * @nr_pages - number of pfns to add in the region
>+ *
>+ * This is only intended for hotplug, and clear the related subsection
>+ * map inside one section.
>+ *
>+ * Return:
>+ * * -EINVAL	- Section already deactived.
>+ * * 0		- Subsection map is emptied.
>+ * * 1		- Subsection map is not empty.
>+ */
>+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
> {
> 	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> 	DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
> 	struct mem_section *ms = __pfn_to_section(pfn);
>-	bool section_is_early = early_section(ms);
>-	struct page *memmap = NULL;
> 	unsigned long *subsection_map = ms->usage
> 		? &ms->usage->subsection_map[0] : NULL;
> 
>@@ -744,8 +755,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> 	if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
> 				"section already deactivated (%#lx + %ld)\n",
> 				pfn, nr_pages))
>-		return;
>+		return -EINVAL;
>+
>+	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
> 
>+	if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION))
>+		return 0;
>+
>+	return 1;
>+}
>+
>+static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
>+		struct vmem_altmap *altmap)
>+{
>+	struct mem_section *ms = __pfn_to_section(pfn);
>+	bool section_is_early = early_section(ms);
>+	struct page *memmap = NULL;
>+	int rc;
>+
>+
>+	rc = clear_subsection_map(pfn, nr_pages);
>+	if(IS_ERR_VALUE((unsigned long)rc))
>+		return;
> 	/*
> 	 * There are 3 cases to handle across two configurations
> 	 * (SPARSEMEM_VMEMMAP={y,n}):
>@@ -763,8 +794,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> 	 *
> 	 * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
> 	 */
>-	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
>-	if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
>+	if (!rc) {
> 		unsigned long section_nr = pfn_to_section_nr(pfn);
> 
> 		/*
>-- 
>2.17.2
Baoquan He Feb. 10, 2020, 3:36 a.m. UTC | #2
On 02/10/20 at 07:07am, Wei Yang wrote:
> On Sun, Feb 09, 2020 at 06:48:21PM +0800, Baoquan He wrote:
> >Wrap the codes clearing subsection map of one memory region in
> >section_deactivate() into clear_subsection_map().
> >
> 
> Patch 1 and 2 server the same purpose -- to #ifdef the VMEMMAP.

Hmm, I didn't say patch 1 and 2 are preparation works because they had
better be done even if we don't take off subsection map from
SPARSEMEM|!VMEMMAP case. Wrapping the subsection map filling and clearing
codes into separate new functions, can make section_activate() and
section_deactivate() much clearer on code logic.

If you don't mind, I will keep them for now, and see what other people
will say.

Thanks
Baoquan

> 
> >---
> > mm/sparse.c | 44 +++++++++++++++++++++++++++++++++++++-------
> > 1 file changed, 37 insertions(+), 7 deletions(-)
> >
> >diff --git a/mm/sparse.c b/mm/sparse.c
> >index 9ad741ccbeb6..696f6b9f706e 100644
> >--- a/mm/sparse.c
> >+++ b/mm/sparse.c
> >@@ -726,14 +726,25 @@ static void free_map_bootmem(struct page *memmap)
> > }
> > #endif /* CONFIG_SPARSEMEM_VMEMMAP */
> > 
> >-static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> >-		struct vmem_altmap *altmap)
> >+/**
> >+ * clear_subsection_map - Clear subsection map of one memory region
> >+ *
> >+ * @pfn - start pfn of the memory range
> >+ * @nr_pages - number of pfns to add in the region
> >+ *
> >+ * This is only intended for hotplug, and clear the related subsection
> >+ * map inside one section.
> >+ *
> >+ * Return:
> >+ * * -EINVAL	- Section already deactived.
> >+ * * 0		- Subsection map is emptied.
> >+ * * 1		- Subsection map is not empty.
> >+ */
> >+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
> > {
> > 	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> > 	DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
> > 	struct mem_section *ms = __pfn_to_section(pfn);
> >-	bool section_is_early = early_section(ms);
> >-	struct page *memmap = NULL;
> > 	unsigned long *subsection_map = ms->usage
> > 		? &ms->usage->subsection_map[0] : NULL;
> > 
> >@@ -744,8 +755,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> > 	if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
> > 				"section already deactivated (%#lx + %ld)\n",
> > 				pfn, nr_pages))
> >-		return;
> >+		return -EINVAL;
> >+
> >+	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
> > 
> >+	if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION))
> >+		return 0;
> >+
> >+	return 1;
> >+}
> >+
> >+static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> >+		struct vmem_altmap *altmap)
> >+{
> >+	struct mem_section *ms = __pfn_to_section(pfn);
> >+	bool section_is_early = early_section(ms);
> >+	struct page *memmap = NULL;
> >+	int rc;
> >+
> >+
> >+	rc = clear_subsection_map(pfn, nr_pages);
> >+	if(IS_ERR_VALUE((unsigned long)rc))
> >+		return;
> > 	/*
> > 	 * There are 3 cases to handle across two configurations
> > 	 * (SPARSEMEM_VMEMMAP={y,n}):
> >@@ -763,8 +794,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> > 	 *
> > 	 * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
> > 	 */
> >-	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
> >-	if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
> >+	if (!rc) {
> > 		unsigned long section_nr = pfn_to_section_nr(pfn);
> > 
> > 		/*
> >-- 
> >2.17.2
> 
> -- 
> Wei Yang
> Help you, Help me
>
Wei Yang Feb. 10, 2020, 6:02 a.m. UTC | #3
On Mon, Feb 10, 2020 at 11:36:27AM +0800, Baoquan He wrote:
>On 02/10/20 at 07:07am, Wei Yang wrote:
>> On Sun, Feb 09, 2020 at 06:48:21PM +0800, Baoquan He wrote:
>> >Wrap the codes clearing subsection map of one memory region in
>> >section_deactivate() into clear_subsection_map().
>> >
>> 
>> Patch 1 and 2 server the same purpose -- to #ifdef the VMEMMAP.
>
>Hmm, I didn't say patch 1 and 2 are preparation works because they had
>better be done even if we don't take off subsection map from
>SPARSEMEM|!VMEMMAP case. Wrapping the subsection map filling and clearing
>codes into separate new functions, can make section_activate() and
>section_deactivate() much clearer on code logic.
>
>If you don't mind, I will keep them for now, and see what other people
>will say.

No objection.

>
>Thanks
>Baoquan
>
>> 
>> >---
>> > mm/sparse.c | 44 +++++++++++++++++++++++++++++++++++++-------
>> > 1 file changed, 37 insertions(+), 7 deletions(-)
>> >
>> >diff --git a/mm/sparse.c b/mm/sparse.c
>> >index 9ad741ccbeb6..696f6b9f706e 100644
>> >--- a/mm/sparse.c
>> >+++ b/mm/sparse.c
>> >@@ -726,14 +726,25 @@ static void free_map_bootmem(struct page *memmap)
>> > }
>> > #endif /* CONFIG_SPARSEMEM_VMEMMAP */
>> > 
>> >-static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
>> >-		struct vmem_altmap *altmap)
>> >+/**
>> >+ * clear_subsection_map - Clear subsection map of one memory region
>> >+ *
>> >+ * @pfn - start pfn of the memory range
>> >+ * @nr_pages - number of pfns to add in the region
>> >+ *
>> >+ * This is only intended for hotplug, and clear the related subsection
>> >+ * map inside one section.
>> >+ *
>> >+ * Return:
>> >+ * * -EINVAL	- Section already deactived.
>> >+ * * 0		- Subsection map is emptied.
>> >+ * * 1		- Subsection map is not empty.
>> >+ */
>> >+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
>> > {
>> > 	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
>> > 	DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
>> > 	struct mem_section *ms = __pfn_to_section(pfn);
>> >-	bool section_is_early = early_section(ms);
>> >-	struct page *memmap = NULL;
>> > 	unsigned long *subsection_map = ms->usage
>> > 		? &ms->usage->subsection_map[0] : NULL;
>> > 
>> >@@ -744,8 +755,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
>> > 	if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
>> > 				"section already deactivated (%#lx + %ld)\n",
>> > 				pfn, nr_pages))
>> >-		return;
>> >+		return -EINVAL;
>> >+
>> >+	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
>> > 
>> >+	if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION))
>> >+		return 0;
>> >+
>> >+	return 1;
>> >+}
>> >+
>> >+static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
>> >+		struct vmem_altmap *altmap)
>> >+{
>> >+	struct mem_section *ms = __pfn_to_section(pfn);
>> >+	bool section_is_early = early_section(ms);
>> >+	struct page *memmap = NULL;
>> >+	int rc;
>> >+
>> >+
>> >+	rc = clear_subsection_map(pfn, nr_pages);
>> >+	if(IS_ERR_VALUE((unsigned long)rc))
>> >+		return;
>> > 	/*
>> > 	 * There are 3 cases to handle across two configurations
>> > 	 * (SPARSEMEM_VMEMMAP={y,n}):
>> >@@ -763,8 +794,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
>> > 	 *
>> > 	 * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
>> > 	 */
>> >-	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
>> >-	if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
>> >+	if (!rc) {
>> > 		unsigned long section_nr = pfn_to_section_nr(pfn);
>> > 
>> > 		/*
>> >-- 
>> >2.17.2
>> 
>> -- 
>> Wei Yang
>> Help you, Help me
>>
diff mbox series

Patch

diff --git a/mm/sparse.c b/mm/sparse.c
index 9ad741ccbeb6..696f6b9f706e 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -726,14 +726,25 @@  static void free_map_bootmem(struct page *memmap)
 }
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
-static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
-		struct vmem_altmap *altmap)
+/**
+ * clear_subsection_map - Clear subsection map of one memory region
+ *
+ * @pfn - start pfn of the memory range
+ * @nr_pages - number of pfns to add in the region
+ *
+ * This is only intended for hotplug, and clear the related subsection
+ * map inside one section.
+ *
+ * Return:
+ * * -EINVAL	- Section already deactived.
+ * * 0		- Subsection map is emptied.
+ * * 1		- Subsection map is not empty.
+ */
+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
 {
 	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
 	DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
 	struct mem_section *ms = __pfn_to_section(pfn);
-	bool section_is_early = early_section(ms);
-	struct page *memmap = NULL;
 	unsigned long *subsection_map = ms->usage
 		? &ms->usage->subsection_map[0] : NULL;
 
@@ -744,8 +755,28 @@  static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
 	if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
 				"section already deactivated (%#lx + %ld)\n",
 				pfn, nr_pages))
-		return;
+		return -EINVAL;
+
+	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
 
+	if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION))
+		return 0;
+
+	return 1;
+}
+
+static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+		struct vmem_altmap *altmap)
+{
+	struct mem_section *ms = __pfn_to_section(pfn);
+	bool section_is_early = early_section(ms);
+	struct page *memmap = NULL;
+	int rc;
+
+
+	rc = clear_subsection_map(pfn, nr_pages);
+	if(IS_ERR_VALUE((unsigned long)rc))
+		return;
 	/*
 	 * There are 3 cases to handle across two configurations
 	 * (SPARSEMEM_VMEMMAP={y,n}):
@@ -763,8 +794,7 @@  static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
 	 *
 	 * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
 	 */
-	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
-	if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
+	if (!rc) {
 		unsigned long section_nr = pfn_to_section_nr(pfn);
 
 		/*