@@ -5562,15 +5562,22 @@ EXPORT_SYMBOL(__might_fault);
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+
+struct subpage_arg {
+ struct page *dst;
+ struct page *src;
+ struct vm_area_struct *vma;
+};
+
/*
* Process all subpages of the specified huge page with the specified
* operation. The target subpage will be processed last to keep its
* cache lines hot.
*/
-static inline void process_huge_page(
+static inline void process_huge_page(struct subpage_arg *sa,
unsigned long addr_hint, unsigned int pages_per_huge_page,
- void (*process_subpage)(unsigned long addr, int idx, void *arg),
- void *arg)
+ void (*process_subpage)(struct subpage_arg *sa,
+ unsigned long addr, int idx))
{
int i, n, base, l;
unsigned long addr = addr_hint &
@@ -5586,7 +5593,7 @@ static inline void process_huge_page(
/* Process subpages at the end of huge page */
for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
cond_resched();
- process_subpage(addr + i * PAGE_SIZE, i, arg);
+ process_subpage(sa, addr + i * PAGE_SIZE, i);
}
} else {
/* If target subpage in second half of huge page */
@@ -5595,7 +5602,7 @@ static inline void process_huge_page(
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
- process_subpage(addr + i * PAGE_SIZE, i, arg);
+ process_subpage(sa, addr + i * PAGE_SIZE, i);
}
}
/*
@@ -5607,9 +5614,9 @@ static inline void process_huge_page(
int right_idx = base + 2 * l - 1 - i;
cond_resched();
- process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
+ process_subpage(sa, addr + left_idx * PAGE_SIZE, left_idx);
cond_resched();
- process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
+ process_subpage(sa, addr + right_idx * PAGE_SIZE, right_idx);
}
}
@@ -5628,9 +5635,9 @@ static void clear_gigantic_page(struct page *page,
}
}
-static void clear_subpage(unsigned long addr, int idx, void *arg)
+static void clear_subpage(struct subpage_arg *sa, unsigned long addr, int idx)
{
- struct page *page = arg;
+ struct page *page = sa->dst;
clear_user_highpage(page + idx, addr);
}
@@ -5640,13 +5647,18 @@ void clear_huge_page(struct page *page,
{
unsigned long addr = addr_hint &
~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+ struct subpage_arg sa = {
+ .dst = page,
+ .src = NULL,
+ .vma = NULL,
+ };
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
clear_gigantic_page(page, addr, pages_per_huge_page);
return;
}
- process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+ process_huge_page(&sa, addr_hint, pages_per_huge_page, clear_subpage);
}
static void copy_user_gigantic_page(struct page *dst, struct page *src,
@@ -5668,16 +5680,9 @@ static void copy_user_gigantic_page(struct page *dst, struct page *src,
}
}
-struct copy_subpage_arg {
- struct page *dst;
- struct page *src;
- struct vm_area_struct *vma;
-};
-
-static void copy_subpage(unsigned long addr, int idx, void *arg)
+static void copy_subpage(struct subpage_arg *copy_arg,
+ unsigned long addr, int idx)
{
- struct copy_subpage_arg *copy_arg = arg;
-
copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
addr, copy_arg->vma);
}
@@ -5688,7 +5693,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
{
unsigned long addr = addr_hint &
~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
- struct copy_subpage_arg arg = {
+ struct subpage_arg sa = {
.dst = dst,
.src = src,
.vma = vma,
@@ -5700,7 +5705,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
return;
}
- process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
+ process_huge_page(&sa, addr_hint, pages_per_huge_page, copy_subpage);
}
long copy_huge_page_from_user(struct page *dst_page,
Mechanical change to process_huge_page() to pass subpage clear/copy args via struct subpage_arg * instead of passing an opaque pointer around. No change in generated code. Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com> --- mm/memory.c | 47 ++++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 21 deletions(-)