@@ -425,5 +425,6 @@ void update_mmu_tlb_range(struct vm_area_struct *vma,
* SHM area cache aliasing for userland.
*/
#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_MMAP_HINT
#endif /* _XTENSA_PGTABLE_H */
@@ -54,6 +54,24 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
}
#ifdef CONFIG_MMU
+unsigned long arch_mmap_hint(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ if (!addr)
+ return 0;
+
+ if (len > TASK_SIZE)
+ return 0;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ return generic_mmap_hint(filp, addr, len, pgoff, flags);
+}
+
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags,
vm_flags_t vm_flags)
@@ -73,8 +91,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (len > TASK_SIZE)
return -ENOMEM;
- if (!addr)
- addr = TASK_UNMAPPED_BASE;
+
+ addr = arch_mmap_hint(filp, addr, len, pgoff, flags);
+ if (addr)
+ return addr;
+
+ addr = TASK_UNMAPPED_BASE;
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr, pgoff);
Introduce xtensa arch_mmap_hint() and define HAVE_ARCH_MMAP_HINT. If a sufficiently sized hole doesn't exist at the hint address, fallback to searching the entire valid VA space instead of only the VA space above the hint address. Signed-off-by: Kalesh Singh <kaleshsingh@google.com> --- arch/xtensa/include/asm/pgtable.h | 1 + arch/xtensa/kernel/syscall.c | 26 ++++++++++++++++++++++++-- 2 files changed, 25 insertions(+), 2 deletions(-)