@@ -308,6 +308,10 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
+
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+extern int is_vmalloc_addr(const void *x);
+#else
static inline int is_vmalloc_addr(const void *x)
{
#ifdef CONFIG_MMU
@@ -318,6 +322,8 @@ static inline int is_vmalloc_addr(const void *x)
return 0;
#endif
}
+#endif
+
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
@@ -16,6 +16,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
+#define VM_LOWMEM 0x00000040 /* Tracking of direct mapped lowmem */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -519,3 +519,14 @@ config MEM_SOFT_DIRTY
it can be cleared by hands.
See Documentation/vm/soft-dirty.txt for more details.
+
+config ENABLE_VMALLOC_SAVING
+ bool "Intermix lowmem and vmalloc virtual space"
+ depends on ARCH_TRACKS_VMALLOC
+ help
+ Some memory layouts on embedded systems steal large amounts
+ of lowmem physical memory for purposes outside of the kernel.
+ Rather than waste the physical and virtual space, allow the
+ kernel to use the virtual space as vmalloc space.
+
+ If unsure, say N.
@@ -282,6 +282,38 @@ static unsigned long cached_align;
static unsigned long vmap_area_pcpu_hole;
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+int is_vmalloc_addr(const void *x)
+{
+ struct vmap_area *va;
+ int ret = 0;
+
+ spin_lock(&vmap_area_lock);
+ list_for_each_entry(va, &vmap_area_list, list) {
+ if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
+ continue;
+
+ if (!(va->flags & VM_VM_AREA))
+ continue;
+
+ if (va->vm == NULL)
+ continue;
+
+ if (va->vm->flags & VM_LOWMEM)
+ continue;
+
+ if ((unsigned long)x >= va->va_start &&
+ (unsigned long)x < va->va_end) {
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock(&vmap_area_lock);
+ return ret;
+}
+EXPORT_SYMBOL(is_vmalloc_addr);
+#endif
+
static struct vmap_area *__find_vmap_area(unsigned long addr)
{
struct rb_node *n = vmap_area_root.rb_node;
@@ -2628,6 +2660,9 @@ static int s_show(struct seq_file *m, void *p)
if (v->flags & VM_VPAGES)
seq_printf(m, " vpages");
+ if (v->flags & VM_LOWMEM)
+ seq_printf(m, " lowmem");
+
show_numa_info(m, v);
seq_putc(m, '\n');
return 0;