@@ -1366,8 +1366,14 @@ static int collapse_pte_mapped_anon_thp(struct mm_struct *mm,
* Case 1:
* No subpages are PageAnonExclusive (PTEs must be R/O), we can
* collapse into a R/O PMD without further action.
+ *
+ * Case 2:
+ * All subpages are PageAnonExclusive (PTEs may be either R/O or R/W),
+ * we clear PageAnonExclusive on all tail pages but the head page and
+ * collapse to a R/W PMD with VM_WRITE or a R/O PMD without VM_WRITE.
*/
- if (!(exclusive == 0 && !writable))
+ if (!((exclusive == 0 && !writable) ||
+ (exclusive == HPAGE_PMD_NR)))
goto drop_hpage;
/* Collapse pmd entry */
@@ -1396,12 +1402,21 @@ static int collapse_pte_mapped_anon_thp(struct mm_struct *mm,
page = vm_normal_page(vma, addr, pteval);
page_remove_rmap(page, vma, false);
+
+ if (exclusive == HPAGE_PMD_NR)
+ ClearPageAnonExclusive(page);
}
pte_unmap_unlock(start_pte, ptl);
/* Install pmd entry */
pgtable = pmd_pgtable(pmdval);
pmdval = mk_huge_pmd(hpage, vma->vm_page_prot);
+
+ if (exclusive == HPAGE_PMD_NR) {
+ SetPageAnonExclusive(hpage);
+ pmdval = maybe_pmd_mkwrite(pmd_mkdirty(pmdval), vma);
+ }
+
spin_lock(pml);
page_add_anon_rmap(hpage, vma, haddr, RMAP_COMPOUND);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
@@ -1596,7 +1611,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
out_unmap:
pte_unmap_unlock(pte, ptl);
- if (is_hpage && (exclusive == 0 && !writable)) {
+ if (is_hpage &&
+ ((exclusive == 0 && !writable) ||
+ (exclusive == HPAGE_PMD_NR))) {
int res;
res = collapse_pte_mapped_anon_thp(mm, vma, address,
This adds another case, as David points out, which is suitable for mapping anonymous pte-mapped THPs by pmds. When all subpages are PageAnonExclusive (PTEs may be either R/O or R/W), we can clear PageAnonExclusive on all tail pages but the first (head) page and collapse to a R/W PMD with VM_WRITE or a R/O PMD without VM_WRITE. Signed-off-by: Xu Yu <xuyu@linux.alibaba.com> --- mm/khugepaged.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-)