@@ -86,6 +86,14 @@ static struct kmem_cache *mm_slot_cache __read_mostly;
#define MAX_PTE_MAPPED_THP 8
+struct collapse_control {
+ /* Num pages scanned per node */
+ int node_load[MAX_NUMNODES];
+
+ /* Last target selected in khugepaged_find_target_node() */
+ int last_target_node;
+};
+
/**
* struct mm_slot - hash lookup from mm to mm_slot
* @hash: hash collision list
@@ -786,9 +794,7 @@ static void khugepaged_alloc_sleep(void)
remove_wait_queue(&khugepaged_wait, &wait);
}
-static int khugepaged_node_load[MAX_NUMNODES];
-
-static bool khugepaged_scan_abort(int nid)
+static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
{
int i;
@@ -800,11 +806,11 @@ static bool khugepaged_scan_abort(int nid)
return false;
/* If there is a count for this node already, it must be acceptable */
- if (khugepaged_node_load[nid])
+ if (cc->node_load[nid])
return false;
for (i = 0; i < MAX_NUMNODES; i++) {
- if (!khugepaged_node_load[i])
+ if (!cc->node_load[i])
continue;
if (node_distance(nid, i) > node_reclaim_distance)
return true;
@@ -819,28 +825,27 @@ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
}
#ifdef CONFIG_NUMA
-static int khugepaged_find_target_node(void)
+static int khugepaged_find_target_node(struct collapse_control *cc)
{
- static int last_khugepaged_target_node = NUMA_NO_NODE;
int nid, target_node = 0, max_value = 0;
/* find first node with max normal pages hit */
for (nid = 0; nid < MAX_NUMNODES; nid++)
- if (khugepaged_node_load[nid] > max_value) {
- max_value = khugepaged_node_load[nid];
+ if (cc->node_load[nid] > max_value) {
+ max_value = cc->node_load[nid];
target_node = nid;
}
/* do some balance if several nodes have the same hit record */
- if (target_node <= last_khugepaged_target_node)
- for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
- nid++)
- if (max_value == khugepaged_node_load[nid]) {
+ if (target_node <= cc->last_target_node)
+ for (nid = cc->last_target_node + 1; nid < MAX_NUMNODES;
+ nid++)
+ if (max_value == cc->node_load[nid]) {
target_node = nid;
break;
}
- last_khugepaged_target_node = target_node;
+ cc->last_target_node = target_node;
return target_node;
}
@@ -878,7 +883,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
return *hpage;
}
#else
-static int khugepaged_find_target_node(void)
+static int khugepaged_find_target_node(struct collapse_control *cc)
{
return 0;
}
@@ -1235,10 +1240,9 @@ static void collapse_huge_page(struct mm_struct *mm,
return;
}
-static int khugepaged_scan_pmd(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long address,
- struct page **hpage)
+static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, struct page **hpage,
+ struct collapse_control *cc)
{
pmd_t *pmd;
pte_t *pte, *_pte;
@@ -1256,7 +1260,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
if (result != SCAN_SUCCEED)
goto out;
- memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
+ memset(cc->node_load, 0, sizeof(cc->node_load));
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
@@ -1322,16 +1326,16 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
/*
* Record which node the original page is from and save this
- * information to khugepaged_node_load[].
+ * information to cc->node_load[].
* Khugepaged will allocate hugepage from the node has the max
* hit record.
*/
node = page_to_nid(page);
- if (khugepaged_scan_abort(node)) {
+ if (khugepaged_scan_abort(node, cc)) {
result = SCAN_SCAN_ABORT;
goto out_unmap;
}
- khugepaged_node_load[node]++;
+ cc->node_load[node]++;
if (!PageLRU(page)) {
result = SCAN_PAGE_LRU;
goto out_unmap;
@@ -1382,7 +1386,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
out_unmap:
pte_unmap_unlock(pte, ptl);
if (ret) {
- node = khugepaged_find_target_node();
+ node = khugepaged_find_target_node(cc);
/* collapse_huge_page will return with the mmap_lock released */
collapse_huge_page(mm, address, hpage, node,
referenced, unmapped);
@@ -2033,8 +2037,9 @@ static void collapse_file(struct mm_struct *mm,
/* TODO: tracepoints */
}
-static void khugepaged_scan_file(struct mm_struct *mm,
- struct file *file, pgoff_t start, struct page **hpage)
+static void khugepaged_scan_file(struct mm_struct *mm, struct file *file,
+ pgoff_t start, struct page **hpage,
+ struct collapse_control *cc)
{
struct page *page = NULL;
struct address_space *mapping = file->f_mapping;
@@ -2045,7 +2050,7 @@ static void khugepaged_scan_file(struct mm_struct *mm,
present = 0;
swap = 0;
- memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
+ memset(cc->node_load, 0, sizeof(cc->node_load));
rcu_read_lock();
xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
if (xas_retry(&xas, page))
@@ -2070,11 +2075,11 @@ static void khugepaged_scan_file(struct mm_struct *mm,
}
node = page_to_nid(page);
- if (khugepaged_scan_abort(node)) {
+ if (khugepaged_scan_abort(node, cc)) {
result = SCAN_SCAN_ABORT;
break;
}
- khugepaged_node_load[node]++;
+ cc->node_load[node]++;
if (!PageLRU(page)) {
result = SCAN_PAGE_LRU;
@@ -2107,7 +2112,7 @@ static void khugepaged_scan_file(struct mm_struct *mm,
result = SCAN_EXCEED_NONE_PTE;
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
} else {
- node = khugepaged_find_target_node();
+ node = khugepaged_find_target_node(cc);
collapse_file(mm, file, start, hpage, node);
}
}
@@ -2115,8 +2120,9 @@ static void khugepaged_scan_file(struct mm_struct *mm,
/* TODO: tracepoints */
}
#else
-static void khugepaged_scan_file(struct mm_struct *mm,
- struct file *file, pgoff_t start, struct page **hpage)
+static void khugepaged_scan_file(struct mm_struct *mm, struct file *file,
+ pgoff_t start, struct page **hpage,
+ struct collapse_control *cc)
{
BUILD_BUG();
}
@@ -2127,7 +2133,8 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
#endif
static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
- struct page **hpage)
+ struct page **hpage,
+ struct collapse_control *cc)
__releases(&khugepaged_mm_lock)
__acquires(&khugepaged_mm_lock)
{
@@ -2203,12 +2210,13 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
mmap_read_unlock(mm);
ret = 1;
- khugepaged_scan_file(mm, file, pgoff, hpage);
+ khugepaged_scan_file(mm, file, pgoff, hpage,
+ cc);
fput(file);
} else {
ret = khugepaged_scan_pmd(mm, vma,
khugepaged_scan.address,
- hpage);
+ hpage, cc);
}
/* move to next address */
khugepaged_scan.address += HPAGE_PMD_SIZE;
@@ -2264,7 +2272,7 @@ static int khugepaged_wait_event(void)
kthread_should_stop();
}
-static void khugepaged_do_scan(void)
+static void khugepaged_do_scan(struct collapse_control *cc)
{
struct page *hpage = NULL;
unsigned int progress = 0, pass_through_head = 0;
@@ -2288,7 +2296,7 @@ static void khugepaged_do_scan(void)
if (khugepaged_has_work() &&
pass_through_head < 2)
progress += khugepaged_scan_mm_slot(pages - progress,
- &hpage);
+ &hpage, cc);
else
progress = pages;
spin_unlock(&khugepaged_mm_lock);
@@ -2327,12 +2335,15 @@ static void khugepaged_wait_work(void)
static int khugepaged(void *none)
{
struct mm_slot *mm_slot;
+ struct collapse_control cc = {
+ .last_target_node = NUMA_NO_NODE,
+ };
set_freezable();
set_user_nice(current, MAX_NICE);
while (!kthread_should_stop()) {
- khugepaged_do_scan();
+ khugepaged_do_scan(&cc);
khugepaged_wait_work();
}
Modularize hugepage collapse by introducing struct collapse_control. This structure serves to describe the properties of the requested collapse, as well as serve as a local scratch pad to use during the collapse itself. Start by moving global per-node khugepaged statistics into this new structure, and stack allocate one for khugepaged collapse context. Signed-off-by: Zach O'Keefe <zokeefe@google.com> --- mm/khugepaged.c | 87 ++++++++++++++++++++++++++++--------------------- 1 file changed, 49 insertions(+), 38 deletions(-)