@@ -42,12 +42,16 @@ SYSCALL_DEFINE0(getpagesize)
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_unmapped_area_info info = {};
+ bool file_hugepage = false;
+
+ if (filp && is_file_hugepages(filp))
+ file_hugepage = true;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) &&
+ if (!file_hugepage && (flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
@@ -62,9 +66,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
info.length = len;
info.low_limit = addr;
info.high_limit = TASK_SIZE;
- info.align_mask = (flags & MAP_SHARED) ?
- (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
+ if (!file_hugepage) {
+ info.align_mask = (flags & MAP_SHARED) ?
+ (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ } else {
+ info.align_mask = huge_page_mask_align(filp);
+ }
return vm_unmapped_area(&info);
}
@@ -87,6 +87,16 @@ static inline unsigned long COLOR_ALIGN(unsigned long addr,
return base + off;
}
+static unsigned long get_align_mask(struct file *filp, unsigned long flags)
+{
+ if (filp && is_file_hugepages(filp))
+ return huge_page_mask_align(filp);
+ if (filp || (flags & MAP_SHARED))
+ return PAGE_MASK & (SHMLBA - 1);
+
+ return 0;
+}
+
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
@@ -94,12 +104,16 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
unsigned long task_size = TASK_SIZE;
int do_color_align;
struct vm_unmapped_area_info info = {};
+ bool file_hugepage = false;
+
+ if (filp && is_file_hugepages(filp))
+ file_hugepage = true;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) &&
+ if (!file_hugepage && (flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
@@ -111,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
return -ENOMEM;
do_color_align = 0;
- if (filp || (flags & MAP_SHARED))
+ if ((filp || (flags & MAP_SHARED)) && !file_hugepage)
do_color_align = 1;
if (addr) {
@@ -129,8 +143,9 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = min(task_size, VA_EXCLUDE_START);
- info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
+ info.align_mask = get_align_mask(filp, flags);
+ if (!file_hugepage)
+ info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
@@ -154,15 +169,19 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
unsigned long addr = addr0;
int do_color_align;
struct vm_unmapped_area_info info = {};
+ bool file_hugepage = false
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
+ if (filp && is_file_hugepages(filp))
+ file_hugepage = true;
+
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) &&
+ if (!file_hugepage && (flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
@@ -172,7 +191,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return -ENOMEM;
do_color_align = 0;
- if (filp || (flags & MAP_SHARED))
+ if ((filp || (flags & MAP_SHARED)) && !file_hugepage)
do_color_align = 1;
/* requesting a specific address */
@@ -192,8 +211,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
- info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
+ info.align_mask = get_align_mask(filp, flags);
+ if (!file_hugepage)
+ info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
/*
We want to stop special casing hugetlb mappings and make them go through generic channels, so teach arch_get_unmapped_area{_topdown} to handle those. sparc specific hugetlb function does not set info.align_offset, and does not care about adjusting the align_mask for MAP_SHARED cases, so the same here for compatibility. Signed-off-by: Oscar Salvador <osalvador@suse.de> --- arch/sparc/kernel/sys_sparc_32.c | 16 ++++++++++---- arch/sparc/kernel/sys_sparc_64.c | 36 +++++++++++++++++++++++++------- 2 files changed, 40 insertions(+), 12 deletions(-)