@@ -414,24 +414,20 @@ static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
test_bit(MMF_DISABLE_THP, &mm->flags);
}
-static bool hugepage_pmd_enabled(void)
+static bool thp_enabled(void)
{
/*
* We cover the anon, shmem and the file-backed case here; file-backed
* hugepages, when configured in, are determined by the global control.
- * Anon pmd-sized hugepages are determined by the pmd-size control.
+ * Anon mTHPs are determined by the per-size control.
* Shmem pmd-sized hugepages are also determined by its pmd-size control,
* except when the global shmem_huge is set to SHMEM_HUGE_DENY.
*/
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
hugepage_global_enabled())
return true;
- if (test_bit(PMD_ORDER, &huge_anon_orders_always))
- return true;
- if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
- return true;
- if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
- hugepage_global_enabled())
+ if (huge_anon_orders_always || huge_anon_orders_madvise ||
+ (huge_anon_orders_inherit && hugepage_global_enabled()))
return true;
if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
return true;
@@ -474,9 +470,9 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
- hugepage_pmd_enabled()) {
- if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
- PMD_ORDER))
+ thp_enabled()) {
+ if (thp_vma_allowable_orders(vma, vm_flags, TVA_ENFORCE_SYSFS,
+ BIT(PMD_ORDER + 1) - 1))
__khugepaged_enter(vma->vm_mm);
}
}
@@ -2586,8 +2582,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
progress++;
break;
}
- if (!thp_vma_allowable_order(vma, vma->vm_flags,
- TVA_ENFORCE_SYSFS, PMD_ORDER)) {
+ if (!thp_vma_allowable_orders(vma, vma->vm_flags,
+ TVA_ENFORCE_SYSFS, BIT(PMD_ORDER + 1) - 1)) {
skip:
progress++;
continue;
@@ -2611,6 +2607,11 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
khugepaged_scan.address + HPAGE_PMD_SIZE >
hend);
if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
+ if (!thp_vma_allowable_order(vma, vma->vm_flags,
+ TVA_ENFORCE_SYSFS, PMD_ORDER)) {
+ khugepaged_scan.address += HPAGE_PMD_SIZE;
+ continue;
+ }
struct file *file = get_file(vma->vm_file);
pgoff_t pgoff = linear_page_index(vma,
khugepaged_scan.address);
@@ -2689,7 +2690,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
static int khugepaged_has_work(void)
{
- return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
+ return !list_empty(&khugepaged_scan.mm_head) && thp_enabled();
}
static int khugepaged_wait_event(void)
@@ -2762,7 +2763,7 @@ static void khugepaged_wait_work(void)
return;
}
- if (hugepage_pmd_enabled())
+ if (thp_enabled())
wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
}
@@ -2793,7 +2794,7 @@ static void set_recommended_min_free_kbytes(void)
int nr_zones = 0;
unsigned long recommended_min;
- if (!hugepage_pmd_enabled()) {
+ if (!thp_enabled()) {
calculate_min_free_kbytes();
goto update_wmarks;
}
@@ -2843,7 +2844,7 @@ int start_stop_khugepaged(void)
int err = 0;
mutex_lock(&khugepaged_mutex);
- if (hugepage_pmd_enabled()) {
+ if (thp_enabled()) {
if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged");
@@ -2869,7 +2870,7 @@ int start_stop_khugepaged(void)
void khugepaged_min_free_kbytes_update(void)
{
mutex_lock(&khugepaged_mutex);
- if (hugepage_pmd_enabled() && khugepaged_thread)
+ if (thp_enabled() && khugepaged_thread)
set_recommended_min_free_kbytes();
mutex_unlock(&khugepaged_mutex);
}
Activate khugepaged for anonymous collapse even if a single order is activated. Note that, we are still scanning the VMAs only when they are PMD-aligned/sized, for ease of implementation. Signed-off-by: Dev Jain <dev.jain@arm.com> --- mm/khugepaged.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-)