@@ -210,6 +210,9 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
unsigned long flags,
unsigned long start, unsigned long end,
const void *caller);
+struct vm_struct *get_vm_area_node(unsigned long size, unsigned long align,
+ unsigned long flags, int node, gfp_t gfp,
+ const void *caller);
void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
@@ -241,10 +244,22 @@ static inline void set_vm_flush_reset_perms(void *addr)
vm->flags |= VM_FLUSH_RESET_PERMS;
}
+int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift);
+
#else
static inline void set_vm_flush_reset_perms(void *addr)
{
}
+
+static inline
+int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift)
+{
+ return -EINVAL;
+}
#endif
/* for /proc/kcore */
@@ -969,19 +969,10 @@ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
*/
#ifdef CONFIG_MMU
void __init vmalloc_init(void);
-int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
- pgprot_t prot, struct page **pages, unsigned int page_shift);
#else
static inline void vmalloc_init(void)
{
}
-
-static inline
-int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
- pgprot_t prot, struct page **pages, unsigned int page_shift)
-{
- return -EINVAL;
-}
#endif
int __must_check __vmap_pages_range_noflush(unsigned long addr,
@@ -2650,6 +2650,30 @@ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
NUMA_NO_NODE, GFP_KERNEL, caller);
}
+/**
+ * get_vm_area_node - reserve a contiguous and aligned kernel virtual area
+ * @size: size of the area
+ * @align: alignment of the start address of the area
+ * @flags: %VM_IOREMAP for I/O mappings
+ * @node: NUMA node from which to allocate the area data structure
+ * @gfp: Flags to pass to the allocator
+ * @caller: Caller to be stored in the vm area data structure
+ *
+ * Search an area of @size/align in the kernel virtual mapping area,
+ * and reserved it for out purposes. Returns the area descriptor
+ * on success or %NULL on failure.
+ *
+ * Return: the area descriptor on success or %NULL on failure.
+ */
+struct vm_struct *get_vm_area_node(unsigned long size, unsigned long align,
+ unsigned long flags, int node, gfp_t gfp,
+ const void *caller)
+{
+ return __get_vm_area_node(size, align, PAGE_SHIFT, flags,
+ VMALLOC_START, VMALLOC_END,
+ node, gfp, caller);
+}
+
/**
* find_vm_area - find a continuous kernel virtual area
* @addr: base address
get_vm_area_node() Unlike the other public get_vm_area_* variants, this one accepts node from which to allocate data structure, and also the align, which allows to create vm area with a specific alignment. This call is going to be used by dynamic stacks in order to ensure that the stack VM area of a specific alignment, and that even if there is only one page mapped, no page table allocations are going to be needed to map the other stack pages. vmap_pages_range_noflush() Is already a global function, but was exported through mm/internal.h, since we will need it from kernel/fork.c in order to map the initial stack pages, move the forward declaration of this function to the linux/vmalloc.h header. Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> --- include/linux/vmalloc.h | 15 +++++++++++++++ mm/internal.h | 9 --------- mm/vmalloc.c | 24 ++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 9 deletions(-)