@@ -42,6 +42,7 @@ struct vm_struct {
unsigned long size;
unsigned long flags;
struct page **pages;
+ unsigned int page_shift;
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
@@ -36,6 +36,7 @@
#include <linux/rbtree_augmented.h>
#include <linux/uaccess.h>
+#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
@@ -439,6 +440,41 @@ static int vmap_pages_range(unsigned long start, unsigned long end,
return ret;
}
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+static int vmap_hpages_range(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift)
+{
+ unsigned long addr = start;
+ unsigned int i, nr = (end - start) >> (PAGE_SHIFT + page_shift);
+
+ for (i = 0; i < nr; i++) {
+ int err;
+
+ err = vmap_range_noflush(addr,
+ addr + (PAGE_SIZE << page_shift),
+ __pa(page_address(pages[i])), prot,
+ page_shift);
+ if (err)
+ return err;
+
+ addr += PAGE_SIZE << page_shift;
+ }
+ flush_cache_vmap(start, end);
+
+ return nr;
+}
+#else
+static int vmap_hpages_range(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift)
+{
+ BUG_ON(page_shift != PAGE_SIZE);
+ return vmap_pages_range(start, end, prot, pages);
+}
+#endif
+
+
int is_vmalloc_or_module_addr(const void *x)
{
/*
@@ -461,7 +497,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
{
unsigned long addr = (unsigned long) vmalloc_addr;
struct page *page = NULL;
- pgd_t *pgd = pgd_offset_k(addr);
+ pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -473,27 +509,38 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
*/
VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
+ pgd = pgd_offset_k(addr);
if (pgd_none(*pgd))
return NULL;
+
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d))
return NULL;
- pud = pud_offset(p4d, addr);
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+ if (p4d_large(*p4d))
+ return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
+#endif
+ if (WARN_ON_ONCE(p4d_bad(*p4d)))
+ return NULL;
- /*
- * Don't dereference bad PUD or PMD (below) entries. This will also
- * identify huge mappings, which we may encounter on architectures
- * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
- * identified as vmalloc addresses by is_vmalloc_addr(), but are
- * not [unambiguously] associated with a struct page, so there is
- * no correct value to return for them.
- */
- WARN_ON_ONCE(pud_bad(*pud));
- if (pud_none(*pud) || pud_bad(*pud))
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud))
+ return NULL;
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+ if (pud_large(*pud))
+ return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+#endif
+ if (WARN_ON_ONCE(pud_bad(*pud)))
return NULL;
+
pmd = pmd_offset(pud, addr);
- WARN_ON_ONCE(pmd_bad(*pmd));
- if (pmd_none(*pmd) || pmd_bad(*pmd))
+ if (pmd_none(*pmd))
+ return NULL;
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+ if (pmd_large(*pmd))
+ return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+#endif
+ if (WARN_ON_ONCE(pmd_bad(*pmd)))
return NULL;
ptep = pte_offset_map(pmd, addr);
@@ -501,6 +548,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
if (pte_present(pte))
page = pte_page(pte);
pte_unmap(ptep);
+
return page;
}
EXPORT_SYMBOL(vmalloc_to_page);
@@ -2184,8 +2232,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
return NULL;
if (flags & VM_IOREMAP)
- align = 1ul << clamp_t(int, get_count_order_long(size),
- PAGE_SHIFT, IOREMAP_MAX_ORDER);
+ align = max(align,
+ 1ul << clamp_t(int, get_count_order_long(size),
+ PAGE_SHIFT, IOREMAP_MAX_ORDER));
area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
if (unlikely(!area))
@@ -2397,7 +2446,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
struct page *page = area->pages[i];
BUG_ON(!page);
- __free_pages(page, 0);
+ __free_pages(page, area->page_shift);
}
kvfree(area->pages);
@@ -2540,14 +2589,17 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node)
{
struct page **pages;
+ unsigned long addr = (unsigned long)area->addr;
+ unsigned long size = get_vm_area_size(area);
+ unsigned int page_shift = area->page_shift;
+ unsigned int shift = page_shift + PAGE_SHIFT;
unsigned int nr_pages, array_size, i;
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
- 0 :
- __GFP_HIGHMEM;
+ 0 : __GFP_HIGHMEM;
- nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
+ nr_pages = size >> shift;
array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
@@ -2568,10 +2620,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
- if (node == NUMA_NO_NODE)
- page = alloc_page(alloc_mask|highmem_mask);
- else
- page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
+ page = alloc_pages_node(node,
+ alloc_mask|highmem_mask, page_shift);
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
@@ -2583,8 +2633,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
cond_resched();
}
- if (map_vm_area(area, prot, pages))
+ if (vmap_hpages_range(addr, addr + size, prot, pages, page_shift) < 0)
goto fail;
+
return area->addr;
fail:
@@ -2618,22 +2669,39 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller)
{
- struct vm_struct *area;
+ struct vm_struct *area = NULL;
void *addr;
unsigned long real_size = size;
+ unsigned long real_align = align;
+ unsigned int shift = PAGE_SHIFT;
size = PAGE_ALIGN(size);
if (!size || (size >> PAGE_SHIFT) > totalram_pages())
goto fail;
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) {
+ unsigned long size_per_node;
+
+ size_per_node = size;
+ if (node == NUMA_NO_NODE)
+ size_per_node /= num_online_nodes();
+ if (size_per_node >= PMD_SIZE)
+ shift = PMD_SHIFT;
+ }
+again:
+ align = max(real_align, 1UL << shift);
+ size = ALIGN(real_size, align);
+
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
vm_flags, start, end, node, gfp_mask, caller);
if (!area)
goto fail;
+ area->page_shift = shift - PAGE_SHIFT;
+
addr = __vmalloc_area_node(area, gfp_mask, prot, node);
if (!addr)
- return NULL;
+ goto fail;
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@ -2647,8 +2715,16 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return addr;
fail:
- warn_alloc(gfp_mask, NULL,
+ if (shift == PMD_SHIFT) {
+ shift = PAGE_SHIFT;
+ goto again;
+ }
+
+ if (!area) {
+ /* Warn for area allocation, page allocations already warn */
+ warn_alloc(gfp_mask, NULL,
"vmalloc: allocation failure: %lu bytes", real_size);
+ }
return NULL;
}
For platforms that define HAVE_ARCH_HUGE_VMAP, have vmalloc try to allocate and map PMD-level huge pages when size is appropriate, and fallback if unsuccessful. Using 2MB page mappings in an 8MB dentry cache hash brings dTLB misses for linux kernel tree `git diff` from 45,000 to 8,000 on a Kaby Lake CPU wth mitigations=off (performance is in the noise, under 1% difference, page tables are likely to be well cached for this workload). Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- include/linux/vmalloc.h | 1 + mm/vmalloc.c | 132 +++++++++++++++++++++++++++++++--------- 2 files changed, 105 insertions(+), 28 deletions(-)