@@ -1264,7 +1264,12 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
{
spinlock_t *dst_ptl, *src_ptl;
pud_t pud;
- int ret;
+ pmd_t *pmd_pgtable = NULL;
+ int ret = -ENOMEM;
+
+ pmd_pgtable = pmd_alloc_one_page_with_ptes(vma->vm_mm, addr);
+ if (unlikely(!pmd_pgtable))
+ goto out;
dst_ptl = pud_lock(dst_mm, dst_pud);
src_ptl = pud_lockptr(src_mm, src_pud);
@@ -1272,16 +1277,30 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = -EAGAIN;
pud = *src_pud;
- if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
- goto out_unlock;
/*
- * When page table lock is held, the huge zero pud should not be
- * under splitting since we don't split the page itself, only pud to
- * a page table.
+ * only transparent huge pud page needs extra page table pages for
+ * possible huge page split
*/
- if (is_huge_zero_pud(pud)) {
- /* No huge zero pud yet */
+ if (!pud_trans_huge(pud))
+ pmd_free_page_with_ptes(dst_mm, pmd_pgtable);
+
+ if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
+ goto out_unlock;
+
+ if (pud_trans_huge(pud)) {
+ struct page *src_page;
+ int i;
+
+ src_page = pud_page(pud);
+ VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
+ get_page(src_page);
+ page_dup_rmap(src_page, true);
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PUD_NR);
+ mm_inc_nr_pmds(dst_mm);
+ for (i = 0; i < (1<<(HPAGE_PUD_ORDER - HPAGE_PMD_ORDER)); i++)
+ mm_inc_nr_ptes(dst_mm);
+ pgtable_trans_huge_pud_deposit(dst_mm, dst_pud, virt_to_page(pmd_pgtable));
}
pudp_set_wrprotect(src_mm, addr, src_pud);
@@ -1292,6 +1311,7 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
out_unlock:
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
+out:
return ret;
}