===================================================================
@@ -1588,6 +1588,8 @@ int vmemmap_populate(struct page *start_
void vmemmap_populate_print_last(void);
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
unsigned long size);
+void vmemmap_kfree(struct page *memmpa, unsigned long nr_pages);
+void vmemmap_free_bootmem(struct page *memmpa, unsigned long nr_pages);
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
===================================================================
@@ -614,12 +614,13 @@ static inline struct page *kmalloc_secti
/* This will make the necessary allocations eventually. */
return sparse_mem_map_populate(pnum, nid);
}
-static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+static void __kfree_section_memmap(struct page *page, unsigned long nr_pages)
{
- return; /* XXX: Not implemented yet */
+ vmemmap_kfree(page, nr_pages);
}
static void free_map_bootmem(struct page *page, unsigned long nr_pages)
{
+ vmemmap_free_bootmem(page, nr_pages);
}
#else
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
===================================================================
@@ -978,6 +978,97 @@ vmemmap_populate(struct page *start_page
return 0;
}
+unsigned long find_and_clear_pte_page(unsigned long addr, unsigned long end,
+ struct page **pp)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long next;
+
+ *pp = NULL;
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd))
+ return (addr + PAGE_SIZE) & PAGE_MASK;
+
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud))
+ return (addr + PAGE_SIZE) & PAGE_MASK;
+
+ if (!cpu_has_pse) {
+ next = (addr + PAGE_SIZE) & PAGE_MASK;
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return next;
+
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_none(*pte))
+ return next;
+
+ *pp = pte_page(*pte);
+ pte_clear(&init_mm, addr, pte);
+ } else {
+ next = pmd_addr_end(addr, end);
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return next;
+
+ *pp = pmd_page(*pmd);
+ pmd_clear(pmd);
+ }
+
+ return next;
+}
+
+void __meminit
+vmemmap_kfree(struct page *memmap, unsigned long nr_pages)
+{
+ unsigned long addr = (unsigned long)memmap;
+ unsigned long end = (unsigned long)(memmap + nr_pages);
+ unsigned long next;
+ unsigned int order;
+ struct page *page;
+
+ for (; addr < end; addr = next) {
+ page = NULL;
+ next = find_and_clear_pte_page(addr, end, &page);
+ if (!page)
+ continue;
+
+ if (is_vmalloc_addr(page_address(page)))
+ vfree(page_address(page));
+ else {
+ order = next - addr;
+ free_pages((unsigned long)page_address(page),
+ get_order(order));
+ }
+ }
+}
+
+void __meminit
+vmemmap_free_bootmem(struct page *memmap, unsigned long nr_pages)
+{
+ unsigned long addr = (unsigned long)memmap;
+ unsigned long end = (unsigned long)(memmap + nr_pages);
+ unsigned long next;
+ struct page *page;
+ unsigned long magic;
+
+ for (; addr < end; addr = next) {
+ page = NULL;
+ next = find_and_clear_pte_page(addr, end, &page);
+ if (!page)
+ continue;
+
+ magic = (unsigned long) page->lru.next;
+ if (magic == SECTION_INFO)
+ put_page_bootmem(page);
+ }
+}
+
void __meminit
register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page,
unsigned long size)
===================================================================
@@ -303,6 +303,8 @@ static int __meminit __add_section(int n
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static int __remove_section(struct zone *zone, struct mem_section *ms)
{
+ unsigned long flags;
+ struct pglist_data *pgdat = zone->zone_pgdat;
int ret;
if (!valid_section(ms))
@@ -310,6 +312,9 @@ static int __remove_section(struct zone
ret = unregister_memory_section(ms);
+ pgdat_resize_lock(pgdat, &flags);
+ sparse_remove_one_section(zone, ms);
+ pgdat_resize_unlock(pgdat, &flags);
return ret;
}
#else
I don't think that all pages of virtual mapping in removed memory can be freed, since page which type is MIX_SECTION_INFO is difficult to free. So, the patch only frees page which type is SECTION_INFO at first. CC: David Rientjes <rientjes@google.com> CC: Jiang Liu <liuj97@gmail.com> CC: Len Brown <len.brown@intel.com> CC: Benjamin Herrenschmidt <benh@kernel.crashing.org> CC: Paul Mackerras <paulus@samba.org> CC: Christoph Lameter <cl@linux.com> Cc: Minchan Kim <minchan.kim@gmail.com> CC: Andrew Morton <akpm@linux-foundation.org> CC: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> --- arch/x86/mm/init_64.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mm.h | 2 + mm/memory_hotplug.c | 5 ++ mm/sparse.c | 5 +- 4 files changed, 101 insertions(+), 2 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe linux-acpi" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html