@@ -201,6 +201,10 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
unsigned long flags,
vm_flags_t vm_flags);
+unsigned long generic_mmap_hint(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
@@ -620,6 +620,47 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
return addr;
}
+/*
+ * Look up unmapped area at the requested hint addr
+ *
+ * NOTE: MAP_FIXED is also handled here since it's a special case of
+ * enforcing the hint address.
+ *
+ * Returns:
+ * ERR_VALUE: If the requested mapping is not valid
+ * 0: If there isn't a sufficiently large hole at the hint addr.
+ * addr: If sufficient VA space is available at the hint address;
+ * or MAP_FIXED was specified.
+ */
+unsigned long generic_mmap_hint(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma, *prev;
+ const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
+
+ /* requested length too big for entire address space */
+ if (len > mmap_end - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+ return addr;
+
+ if (!addr)
+ return 0;
+
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma_prev(mm, addr, &prev);
+ if (mmap_end - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
+ return addr;
+
+ /* Fallback to VA space search */
+ return 0;
+}
+
/* Get an address range which is currently unmapped.
* For shmat() with addr=0.
*
@@ -637,25 +678,13 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info = {};
const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
- if (len > mmap_end - mmap_min_addr)
- return -ENOMEM;
-
- if (flags & MAP_FIXED)
+ addr = generic_mmap_hint(filp, addr, len, pgoff, flags);
+ if (addr)
return addr;
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma_prev(mm, addr, &prev);
- if (mmap_end - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)) &&
- (!prev || addr >= vm_end_gap(prev)))
- return addr;
- }
-
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = mmap_end;
@@ -685,27 +714,14 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
- struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info = {};
const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
- /* requested length too big for entire address space */
- if (len > mmap_end - mmap_min_addr)
- return -ENOMEM;
-
- if (flags & MAP_FIXED)
- return addr;
-
/* requesting a specific address */
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma_prev(mm, addr, &prev);
- if (mmap_end - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)) &&
- (!prev || addr >= vm_end_gap(prev)))
- return addr;
- }
+ addr = generic_mmap_hint(filp, addr, len, pgoff, flags);
+ if (addr)
+ return addr;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
Consolidate the hint searches from both directions (topdown and bottomup) into generic_mmap_hint(). No functional change is introduced. Signed-off-by: Kalesh Singh <kaleshsingh@google.com> --- Changes in v2: - MAP_FIXED case is also handled in arch_mmap_hint() since this is just a special case of the hint addr being "enforced", per Yang Shi. - Consolidate error handling in arch_mmap_hint(). include/linux/sched/mm.h | 4 +++ mm/mmap.c | 76 ++++++++++++++++++++++++---------------- 2 files changed, 50 insertions(+), 30 deletions(-)