@@ -2086,9 +2086,6 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (is_huge_zero_pmd(*pmd))
goto unlock;
- if (pmd_protnone(*pmd))
- goto unlock;
-
folio = page_folio(pmd_page(*pmd));
toptier = node_is_toptier(folio_nid(folio));
/*
@@ -2102,6 +2099,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
!toptier)
folio_xchg_fault_count(folio, atomic_read(&mm->hint_faults));
+
+ if (pmd_protnone(*pmd))
+ goto unlock;
+
}
/*
* In case prot_numa, we are under mmap_read_lock(mm). It's critical
@@ -118,10 +118,6 @@ static long change_pte_range(struct mmu_gather *tlb,
int nid;
bool toptier;
- /* Avoid TLB flush if possible */
- if (pte_protnone(oldpte))
- continue;
-
folio = vm_normal_folio(vma, addr, oldpte);
if (!folio || folio_is_zone_device(folio) ||
folio_test_ksm(folio))
@@ -162,6 +158,11 @@ static long change_pte_range(struct mmu_gather *tlb,
folio_xchg_fault_count(folio,
atomic_read(&vma->vm_mm->hint_faults));
+ /* Avoid TLB flush if possible */
+ if (pte_protnone(oldpte))
+ continue;
+
+
}
oldpte = ptep_modify_prot_start(vma, addr, pte);
During scanning, PTE updates are skipped for those pages which are already marked as PROT_NONE. This is required but update the scan time fault count so that the fault count which is used to calculate the latency is kept uptodate based on the recent scanning iteration. Signed-off-by: Bharata B Rao <bharata@amd.com> --- mm/huge_memory.c | 7 ++++--- mm/mprotect.c | 9 +++++---- 2 files changed, 9 insertions(+), 7 deletions(-)