@@ -12,6 +12,14 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
{
return pmd_large(*pmd);
}
+
+#define vmemmap_pmd_mkhuge vmemmap_pmd_mkhuge
+static inline pmd_t vmemmap_pmd_mkhuge(struct page *page)
+{
+ pte_t entry = pfn_pte(page_to_pfn(page), PAGE_KERNEL_LARGE);
+
+ return __pmd(pte_val(entry));
+}
#endif
#define hugepages_supported() boot_cpu_has(X86_FEATURE_PSE)
@@ -605,6 +605,14 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
}
#endif
+#ifndef vmemmap_pmd_mkhuge
+#define vmemmap_pmd_mkhuge vmemmap_pmd_mkhuge
+static inline pmd_t vmemmap_pmd_mkhuge(struct page *page)
+{
+ return pmd_mkhuge(mk_pmd(page, PAGE_KERNEL));
+}
+#endif
+
#ifndef VMEMMAP_HPAGE_SHIFT
#define VMEMMAP_HPAGE_SHIFT HPAGE_SHIFT
#endif
@@ -1708,6 +1708,63 @@ static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
}
}
+static void __replace_huge_page_pte_vmemmap(pte_t *ptep, unsigned long start,
+ unsigned int nr, struct page *huge,
+ struct list_head *free_pages)
+{
+ unsigned long addr;
+ unsigned long end = start + (nr << PAGE_SHIFT);
+ pgprot_t pgprot = PAGE_KERNEL;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE, ptep++) {
+ struct page *page;
+ pte_t old = *ptep;
+ pte_t entry;
+
+ prepare_vmemmap_page(huge);
+
+ entry = mk_pte(huge++, pgprot);
+ VM_WARN_ON(!pte_present(old));
+ page = pte_page(old);
+ list_add(&page->lru, free_pages);
+
+ set_pte_at(&init_mm, addr, ptep, entry);
+ }
+}
+
+static void replace_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
+ struct page *huge,
+ struct list_head *free_pages)
+{
+ unsigned long end = start + VMEMMAP_HPAGE_SIZE;
+
+ flush_cache_vunmap(start, end);
+ __replace_huge_page_pte_vmemmap(pte_offset_kernel(pmd, start), start,
+ VMEMMAP_HPAGE_NR, huge, free_pages);
+ flush_tlb_kernel_range(start, end);
+}
+
+static pte_t *merge_vmemmap_pte(pmd_t *pmdp, unsigned long addr)
+{
+ pte_t *pte;
+ struct page *page;
+
+ pte = pte_offset_kernel(pmdp, addr);
+ page = pte_page(*pte);
+ set_pmd(pmdp, vmemmap_pmd_mkhuge(page));
+
+ return pte;
+}
+
+static void merge_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
+ struct page *huge,
+ struct list_head *free_pages)
+{
+ replace_huge_page_pmd_vmemmap(pmd, start, huge, free_pages);
+ pte_free_kernel(&init_mm, merge_vmemmap_pte(pmd, start));
+ flush_tlb_kernel_range(start, start + VMEMMAP_HPAGE_SIZE);
+}
+
static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
{
int i;
@@ -1721,6 +1778,15 @@ static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
}
}
+static inline void dissolve_compound_page(struct page *page, unsigned int order)
+{
+ int i;
+ unsigned int nr_pages = 1 << order;
+
+ for (i = 1; i < nr_pages; i++)
+ set_page_refcounted(page + i);
+}
+
static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
{
pmd_t *pmd;
@@ -1738,10 +1804,48 @@ static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
__remap_huge_page_pte_vmemmap);
if (!freed_vmemmap_hpage_dec(pmd_page(*pmd)) && pmd_split(pmd)) {
/*
- * Todo:
- * Merge pte to huge pmd if it has ever been split.
+ * Merge pte to huge pmd if it has ever been split. Now only
+ * support gigantic page which's vmemmap pages size is an
+ * integer multiple of PMD_SIZE. This is the simplest case
+ * to handle.
*/
clear_pmd_split(pmd);
+
+ if (IS_ALIGNED(vmemmap_pages_per_hpage(h), VMEMMAP_HPAGE_NR)) {
+ unsigned long addr = (unsigned long)head;
+ unsigned long end = addr +
+ vmemmap_pages_size_per_hpage(h);
+
+ spin_unlock(ptl);
+
+ for (; addr < end; addr += VMEMMAP_HPAGE_SIZE) {
+ void *to;
+ struct page *page;
+
+ page = alloc_pages(GFP_VMEMMAP_PAGE & ~__GFP_NOFAIL,
+ VMEMMAP_HPAGE_ORDER);
+ if (!page)
+ goto out;
+
+ dissolve_compound_page(page,
+ VMEMMAP_HPAGE_ORDER);
+ to = page_to_virt(page);
+ memcpy(to, (void *)addr, VMEMMAP_HPAGE_SIZE);
+
+ /*
+ * Make sure that any data that writes to the
+ * @to is made visible to the physical page.
+ */
+ flush_kernel_vmap_range(to, VMEMMAP_HPAGE_SIZE);
+
+ merge_huge_page_pmd_vmemmap(pmd++, addr, page,
+ &remap_pages);
+ }
+
+out:
+ free_vmemmap_page_list(&remap_pages);
+ return;
+ }
}
spin_unlock(ptl);
}
Merge pte to huge pmd if it has ever been split. Now only support gigantic page which's vmemmap pages size is an integer multiple of PMD_SIZE. This is the simplest case to handle. Signed-off-by: Muchun Song <songmuchun@bytedance.com> --- arch/x86/include/asm/hugetlb.h | 8 +++ include/linux/hugetlb.h | 8 +++ mm/hugetlb.c | 108 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 122 insertions(+), 2 deletions(-)