@@ -330,6 +330,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
*/
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#define HAVE_ARCH_MMAP_HINT
#endif /* !__ASSEMBLY__ */
@@ -17,6 +17,31 @@
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+unsigned long arch_mmap_hint(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ int aliasing = cache_is_vipt_aliasing();
+ int do_align = 0;
+
+ if (!addr)
+ return 0;
+
+ /*
+ * We only need to do colour alignment if either the I or D
+ * caches alias.
+ */
+ if (aliasing)
+ do_align = filp || (flags & MAP_SHARED);
+
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ return generic_mmap_hint(filp, addr, len, pgoff, flags);
+}
+
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
@@ -32,7 +57,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info = {};
@@ -57,17 +81,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (len > TASK_SIZE)
return -ENOMEM;
- if (addr) {
- if (do_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
- }
+ addr = arch_mmap_hint(filp, addr, len, pgoff, flags);
+ if (addr)
+ return addr;
info.length = len;
info.low_limit = mm->mmap_base;
@@ -82,7 +98,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags, vm_flags_t vm_flags)
{
- struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_align = 0;
@@ -108,16 +123,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
}
/* requesting a specific address */
- if (addr) {
- if (do_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
- }
+ addr = arch_mmap_hint(filp, addr, len, pgoff, flags);
+ if (addr)
+ return addr;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
Introduce arm arch_mmap_hint() and define HAVE_ARCH_MMAP_HINT. This is a preparatory patch, no functional change is introduced. Signed-off-by: Kalesh Singh <kaleshsingh@google.com> --- arch/arm/include/asm/pgtable.h | 1 + arch/arm/mm/mmap.c | 54 +++++++++++++++++++--------------- 2 files changed, 32 insertions(+), 23 deletions(-)