@@ -54,12 +54,12 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
}
#ifdef CONFIG_MMU
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags,
- vm_flags_t vm_flags)
+unsigned long arch_mmap_hint(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
- struct vm_area_struct *vmm;
- struct vma_iterator vmi;
+ if (len > TASK_SIZE)
+ return -ENOMEM;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
@@ -71,16 +71,27 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
- if (len > TASK_SIZE)
- return -ENOMEM;
- if (!addr)
- addr = TASK_UNMAPPED_BASE;
-
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
+ return generic_mmap_hint(filp, addr, len, pgoff, flags);
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
+{
+ struct vm_area_struct *vmm;
+ struct vma_iterator vmi;
+
+ addr = arch_mmap_hint(filp, addr, len, pgoff, flags);
+ if (addr)
+ return addr;
+
+ addr = TASK_UNMAPPED_BASE;
+
vma_iter_init(&vmi, current->mm, addr);
for_each_vma(vmi, vmm) {
/* At this point: (addr < vmm->vm_end). */
Introduce xtensa arch_mmap_hint() and define HAVE_ARCH_MMAP_HINT. If a sufficiently sized hole doesn't exist at the hint address, fallback to searching the entire valid VA space instead of only the VA space above the hint address. Signed-off-by: Kalesh Singh <kaleshsingh@google.com> --- Changes in v2: - MAP_FIXED case is also handled in arch_mmap_hint() since this is just a special case of the hint addr being "enforced", per Yang Shi. - Consolidate error handling in arch_mmap_hint(). arch/xtensa/kernel/syscall.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-)