@@ -1577,6 +1577,20 @@
Documentation/admin-guide/mm/hugetlbpage.rst.
Format: size[KMG]
+ hugetlb_free_vmemmap=
+ [KNL] When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set,
+ this controls freeing unused vmemmap pages associated
+ with each HugeTLB page. When this option is enabled,
+ we disable PMD/huge page mapping of vmemmap pages which
+ increase page table pages. So if a user/sysadmin only
+ uses a small number of HugeTLB pages (as a percentage
+ of system memory), they could end up using more memory
+ with hugetlb_free_vmemmap on as opposed to off.
+ Format: { on | off (default) }
+
+ on: enable the feature
+ off: disable the feature
+
hung_task_panic=
[KNL] Should the hung task detector generate panics.
Format: 0 | 1
@@ -145,6 +145,9 @@ default_hugepagesz
will all result in 256 2M huge pages being allocated. Valid default
huge page size is architecture dependent.
+hugetlb_free_vmemmap
+ When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set, this enables freeing
+ unused vmemmap pages associated with each HugeTLB page.
When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages``
indicates the current number of pre-allocated huge pages of the default size.
@@ -34,6 +34,7 @@
#include <linux/gfp.h>
#include <linux/kcore.h>
#include <linux/bootmem_info.h>
+#include <linux/hugetlb.h>
#include <asm/processor.h>
#include <asm/bios_ebda.h>
@@ -1557,7 +1558,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
{
int err;
- if (end - start < PAGES_PER_SECTION * sizeof(struct page))
+ if ((is_hugetlb_free_vmemmap_enabled() && !altmap) ||
+ end - start < PAGES_PER_SECTION * sizeof(struct page))
err = vmemmap_populate_basepages(start, end, node, NULL);
else if (boot_cpu_has(X86_FEATURE_PSE))
err = vmemmap_populate_hugepages(start, end, node, altmap);
@@ -1585,6 +1587,8 @@ void register_page_bootmem_memmap(unsigned long section_nr,
pmd_t *pmd;
unsigned int nr_pmd_pages;
struct page *page;
+ bool base_mapping = !boot_cpu_has(X86_FEATURE_PSE) ||
+ is_hugetlb_free_vmemmap_enabled();
for (; addr < end; addr = next) {
pte_t *pte = NULL;
@@ -1610,7 +1614,7 @@ void register_page_bootmem_memmap(unsigned long section_nr,
}
get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
- if (!boot_cpu_has(X86_FEATURE_PSE)) {
+ if (base_mapping) {
next = (addr + PAGE_SIZE) & PAGE_MASK;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
@@ -854,6 +854,20 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
void set_page_huge_active(struct page *page);
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+extern bool hugetlb_free_vmemmap_enabled;
+
+static inline bool is_hugetlb_free_vmemmap_enabled(void)
+{
+ return hugetlb_free_vmemmap_enabled;
+}
+#else
+static inline bool is_hugetlb_free_vmemmap_enabled(void)
+{
+ return false;
+}
+#endif
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
@@ -1007,6 +1021,11 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr
pte_t *ptep, pte_t pte, unsigned long sz)
{
}
+
+static inline bool is_hugetlb_free_vmemmap_enabled(void)
+{
+ return false;
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
@@ -183,6 +183,28 @@
#define RESERVE_VMEMMAP_NR 2U
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
+bool hugetlb_free_vmemmap_enabled;
+
+static int __init early_hugetlb_free_vmemmap_param(char *buf)
+{
+ /* We cannot optimize if a "struct page" crosses page boundaries. */
+ if ((!is_power_of_2(sizeof(struct page)))) {
+ pr_warn("cannot free vmemmap pages because \"struct page\" crosses page boundaries\n");
+ return 0;
+ }
+
+ if (!buf)
+ return -EINVAL;
+
+ if (!strcmp(buf, "on"))
+ hugetlb_free_vmemmap_enabled = true;
+ else if (strcmp(buf, "off"))
+ return -EINVAL;
+
+ return 0;
+}
+early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
+
/*
* How many vmemmap pages associated with a HugeTLB page that can be freed
* to the buddy allocator.