Message ID | 20200110190313.17144-2-joao.m.martins@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [RFC,01/10] mm: Add pmd support for _PAGE_SPECIAL | expand |
On Fri, Jan 10, 2020 at 07:03:04PM +0000, Joao Martins wrote: > +++ b/arch/x86/include/asm/pgtable.h > @@ -293,6 +293,15 @@ static inline int pgd_devmap(pgd_t pgd) > { > return 0; > } > +#endif > + > +#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL > +static inline int pmd_special(pmd_t pmd) > +{ > + return !!(pmd_flags(pmd) & _PAGE_SPECIAL); > +} > +#endif The ifdef/endif don't make much sense here; x86 does have PTE_SPECIAL, and this is an x86 header file, so that can be assumed. > +++ b/mm/gup.c > @@ -2079,6 +2079,9 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, > return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); > } > > + if (pmd_special(orig)) > + return 0; Here, you're calling it unconditionally. I think you need a pmd_special() conditionally defined in include/asm-generic/pgtable.h +#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL +static inline bool pmd_special(pmd_t pmd) +{ + return false; +} +#endif (oh, and plese use bool instead of int; I know that's different from pte_special(), but pte_special() predates bool and nobody's done the work to convert it yet) > +++ b/mm/huge_memory.c > @@ -791,6 +791,8 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, > entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); > if (pfn_t_devmap(pfn)) > entry = pmd_mkdevmap(entry); > + else if (pfn_t_special(pfn)) > + entry = pmd_mkspecial(entry); Again, we'll need a generic one. > @@ -823,8 +825,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) > * but we need to be consistent with PTEs and architectures that > * can't support a 'special' bit. > */ > - BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && > - !pfn_t_devmap(pfn)); > + BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); Should that rather be ... + BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && + !pfn_t_devmap(pfn) && !pfn_t_special(pfn)); I also think this comment needs adjusting: /* * There is no pmd_special() but there may be special pmds, e.g. * in a direct-access (dax) mapping, so let's just replicate the * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here. */
On 2/3/20 9:34 PM, Matthew Wilcox wrote: > On Fri, Jan 10, 2020 at 07:03:04PM +0000, Joao Martins wrote: >> +++ b/arch/x86/include/asm/pgtable.h >> @@ -293,6 +293,15 @@ static inline int pgd_devmap(pgd_t pgd) >> { >> return 0; >> } >> +#endif >> + >> +#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL >> +static inline int pmd_special(pmd_t pmd) >> +{ >> + return !!(pmd_flags(pmd) & _PAGE_SPECIAL); >> +} >> +#endif > > The ifdef/endif don't make much sense here; x86 does have PTE_SPECIAL, > and this is an x86 header file, so that can be assumed. > Gotcha. >> +++ b/mm/gup.c >> @@ -2079,6 +2079,9 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, >> return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); >> } >> >> + if (pmd_special(orig)) >> + return 0; > > Here, you're calling it unconditionally. I think you need a pmd_special() > conditionally defined in include/asm-generic/pgtable.h > > +#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL > +static inline bool pmd_special(pmd_t pmd) > +{ > + return false; > +} > +#endif > > (oh, and plese use bool instead of int; I know that's different from > pte_special(), but pte_special() predates bool and nobody's done the work > to convert it yet) > Got it. >> +++ b/mm/huge_memory.c >> @@ -791,6 +791,8 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, >> entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); >> if (pfn_t_devmap(pfn)) >> entry = pmd_mkdevmap(entry); >> + else if (pfn_t_special(pfn)) >> + entry = pmd_mkspecial(entry); > > Again, we'll need a generic one. > Will add it. >> @@ -823,8 +825,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) >> * but we need to be consistent with PTEs and architectures that >> * can't support a 'special' bit. >> */ >> - BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && >> - !pfn_t_devmap(pfn)); >> + BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); > > Should that rather be ... > > + BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && > + !pfn_t_devmap(pfn) && !pfn_t_special(pfn)); > Yes. That is indeed a mistake I had already fixed for v2. Patch 3 does the exact same, so as the other comments you mentioned here too so will adjust that accordingly. > I also think this comment needs adjusting: > > /* > * There is no pmd_special() but there may be special pmds, e.g. > * in a direct-access (dax) mapping, so let's just replicate the > * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here. > */ > > I'll replace with what vm_normal_page() equivalent has: /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ Joao
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index ad97dc155195..60351c0c15fe 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -255,7 +255,7 @@ static inline int pmd_large(pmd_t pte) #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_trans_huge(pmd_t pmd) { - return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; + return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP|_PAGE_SPECIAL)) == _PAGE_PSE; } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD @@ -293,6 +293,15 @@ static inline int pgd_devmap(pgd_t pgd) { return 0; } +#endif + +#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL +static inline int pmd_special(pmd_t pmd) +{ + return !!(pmd_flags(pmd) & _PAGE_SPECIAL); +} +#endif + #endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -414,6 +423,11 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) return pmd_set_flags(pmd, _PAGE_DEVMAP); } +static inline pmd_t pmd_mkspecial(pmd_t pmd) +{ + return pmd_set_flags(pmd, _PAGE_SPECIAL); +} + static inline pmd_t pmd_mkhuge(pmd_t pmd) { return pmd_set_flags(pmd, _PAGE_PSE); diff --git a/mm/gup.c b/mm/gup.c index 7646bf993b25..ba5f10535392 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2079,6 +2079,9 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); } + if (pmd_special(orig)) + return 0; + refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 41a0fbddc96b..06ad4d6f7477 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -791,6 +791,8 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); if (pfn_t_devmap(pfn)) entry = pmd_mkdevmap(entry); + else if (pfn_t_special(pfn)) + entry = pmd_mkspecial(entry); if (write) { entry = pmd_mkyoung(pmd_mkdirty(entry)); entry = maybe_pmd_mkwrite(entry, vma); @@ -823,8 +825,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) * but we need to be consistent with PTEs and architectures that * can't support a 'special' bit. */ - BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && - !pfn_t_devmap(pfn)); + BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == (VM_PFNMAP|VM_MIXEDMAP)); BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); @@ -2013,7 +2014,7 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) spinlock_t *ptl; ptl = pmd_lock(vma->vm_mm, pmd); if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || - pmd_devmap(*pmd))) + pmd_devmap(*pmd) || pmd_special(*pmd))) return ptl; spin_unlock(ptl); return NULL; diff --git a/mm/memory.c b/mm/memory.c index 45442d9a4f52..cfc3668bddeb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1165,7 +1165,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); - if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { + if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || + pmd_devmap(*pmd) || pmd_special(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) __split_huge_pmd(vma, pmd, addr, false, NULL); else if (zap_huge_pmd(tlb, vma, pmd, addr))
Currently vmf_insert_pfn_pmd only works with devmap and BUG_ON otherwise. Add support for handling page special when pfn_t has it marked with PFN_SPECIAL. Usage of page special aren't expected to do GUP hence return no pages on gup_huge_pmd() much like how it is done for ptes on gup_pte_range(). This allows a DAX driver to handle 2M hugepages without struct pages. Signed-off-by: Joao Martins <joao.m.martins@oracle.com> --- arch/x86/include/asm/pgtable.h | 16 +++++++++++++++- mm/gup.c | 3 +++ mm/huge_memory.c | 7 ++++--- mm/memory.c | 3 ++- 4 files changed, 24 insertions(+), 5 deletions(-)