@@ -437,6 +437,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_MMAP_HINT
#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
@@ -40,6 +40,19 @@ SYSCALL_DEFINE0(getpagesize)
return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
}
+unsigned long arch_mmap_hint(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ if (!addr)
+ return 0;
+
+ if (len > TASK_SIZE - PAGE_SIZE)
+ return 0;
+
+ return generic_mmap_hint(filp, addr, len, pgoff, flags);
+}
+
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
struct vm_unmapped_area_info info = {};
@@ -61,11 +74,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
/* See asm-sparc/uaccess.h */
if (len > TASK_SIZE - PAGE_SIZE)
return -ENOMEM;
- if (!addr)
- addr = TASK_UNMAPPED_BASE;
+
+ addr = arch_mmap_hint(filp, addr, len, pgoff, flags);
+ if (addr)
+ return addr;
info.length = len;
- info.low_limit = addr;
+ info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
if (!file_hugepage) {
info.align_mask = (flags & MAP_SHARED) ?
Introduce sparc32 arch_mmap_hint() and define HAVE_ARCH_MMAP_HINT. If a sufficiently sized hole doesn't exist at the hint address, fallback to searching the entire valid VA space instead of only the VA space above the hint address. Signed-off-by: Kalesh Singh <kaleshsingh@google.com> --- arch/sparc/include/asm/pgtable_32.h | 1 + arch/sparc/kernel/sys_sparc_32.c | 21 ++++++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-)