Message ID | 20190416134522.17540-17-ldufour@linux.ibm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Speculative page faults | expand |
On Tue, Apr 16, 2019 at 03:45:07PM +0200, Laurent Dufour wrote: > When dealing with the speculative fault path we should use the VMA's field > cached value stored in the vm_fault structure. > > Currently vm_normal_page() is using the pointer to the VMA to fetch the > vm_flags value. This patch provides a new __vm_normal_page() which is > receiving the vm_flags flags value as parameter. > > Note: The speculative path is turned on for architecture providing support > for special PTE flag. So only the first block of vm_normal_page is used > during the speculative path. > > Signed-off-by: Laurent Dufour <ldufour@linux.ibm.com> Reviewed-by: Jérôme Glisse <jglisse@redhat.com> > --- > include/linux/mm.h | 18 +++++++++++++++--- > mm/memory.c | 21 ++++++++++++--------- > 2 files changed, 27 insertions(+), 12 deletions(-) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index f465bb2b049e..f14b2c9ddfd4 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -1421,9 +1421,21 @@ static inline void INIT_VMA(struct vm_area_struct *vma) > #endif > } > > -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > - pte_t pte, bool with_public_device); > -#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false) > +struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > + pte_t pte, bool with_public_device, > + unsigned long vma_flags); > +static inline struct page *_vm_normal_page(struct vm_area_struct *vma, > + unsigned long addr, pte_t pte, > + bool with_public_device) > +{ > + return __vm_normal_page(vma, addr, pte, with_public_device, > + vma->vm_flags); > +} > +static inline struct page *vm_normal_page(struct vm_area_struct *vma, > + unsigned long addr, pte_t pte) > +{ > + return _vm_normal_page(vma, addr, pte, false); > +} > > struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, > pmd_t pmd); > diff --git a/mm/memory.c b/mm/memory.c > index 85ec5ce5c0a8..be93f2c8ebe0 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -533,7 +533,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, > } > > /* > - * vm_normal_page -- This function gets the "struct page" associated with a pte. > + * __vm_normal_page -- This function gets the "struct page" associated with > + * a pte. > * > * "Special" mappings do not wish to be associated with a "struct page" (either > * it doesn't exist, or it exists but they don't want to touch it). In this > @@ -574,8 +575,9 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, > * PFNMAP mappings in order to support COWable mappings. > * > */ > -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > - pte_t pte, bool with_public_device) > +struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > + pte_t pte, bool with_public_device, > + unsigned long vma_flags) > { > unsigned long pfn = pte_pfn(pte); > > @@ -584,7 +586,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > goto check_pfn; > if (vma->vm_ops && vma->vm_ops->find_special_page) > return vma->vm_ops->find_special_page(vma, addr); > - if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) > + if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP)) > return NULL; > if (is_zero_pfn(pfn)) > return NULL; > @@ -620,8 +622,8 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > > /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ > > - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { > - if (vma->vm_flags & VM_MIXEDMAP) { > + if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) { > + if (vma_flags & VM_MIXEDMAP) { > if (!pfn_valid(pfn)) > return NULL; > goto out; > @@ -630,7 +632,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > off = (addr - vma->vm_start) >> PAGE_SHIFT; > if (pfn == vma->vm_pgoff + off) > return NULL; > - if (!is_cow_mapping(vma->vm_flags)) > + if (!is_cow_mapping(vma_flags)) > return NULL; > } > } > @@ -2532,7 +2534,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) > { > struct vm_area_struct *vma = vmf->vma; > > - vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); > + vmf->page = __vm_normal_page(vma, vmf->address, vmf->orig_pte, false, > + vmf->vma_flags); > if (!vmf->page) { > /* > * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a > @@ -3706,7 +3709,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) > ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); > update_mmu_cache(vma, vmf->address, vmf->pte); > > - page = vm_normal_page(vma, vmf->address, pte); > + page = __vm_normal_page(vma, vmf->address, pte, false, vmf->vma_flags); > if (!page) { > pte_unmap_unlock(vmf->pte, vmf->ptl); > return 0; > -- > 2.21.0 >
diff --git a/include/linux/mm.h b/include/linux/mm.h index f465bb2b049e..f14b2c9ddfd4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1421,9 +1421,21 @@ static inline void INIT_VMA(struct vm_area_struct *vma) #endif } -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte, bool with_public_device); -#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false) +struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, bool with_public_device, + unsigned long vma_flags); +static inline struct page *_vm_normal_page(struct vm_area_struct *vma, + unsigned long addr, pte_t pte, + bool with_public_device) +{ + return __vm_normal_page(vma, addr, pte, with_public_device, + vma->vm_flags); +} +static inline struct page *vm_normal_page(struct vm_area_struct *vma, + unsigned long addr, pte_t pte) +{ + return _vm_normal_page(vma, addr, pte, false); +} struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); diff --git a/mm/memory.c b/mm/memory.c index 85ec5ce5c0a8..be93f2c8ebe0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -533,7 +533,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, } /* - * vm_normal_page -- This function gets the "struct page" associated with a pte. + * __vm_normal_page -- This function gets the "struct page" associated with + * a pte. * * "Special" mappings do not wish to be associated with a "struct page" (either * it doesn't exist, or it exists but they don't want to touch it). In this @@ -574,8 +575,9 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, * PFNMAP mappings in order to support COWable mappings. * */ -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte, bool with_public_device) +struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, bool with_public_device, + unsigned long vma_flags) { unsigned long pfn = pte_pfn(pte); @@ -584,7 +586,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, goto check_pfn; if (vma->vm_ops && vma->vm_ops->find_special_page) return vma->vm_ops->find_special_page(vma, addr); - if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) + if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; if (is_zero_pfn(pfn)) return NULL; @@ -620,8 +622,8 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { - if (vma->vm_flags & VM_MIXEDMAP) { + if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) { + if (vma_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; @@ -630,7 +632,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; - if (!is_cow_mapping(vma->vm_flags)) + if (!is_cow_mapping(vma_flags)) return NULL; } } @@ -2532,7 +2534,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); + vmf->page = __vm_normal_page(vma, vmf->address, vmf->orig_pte, false, + vmf->vma_flags); if (!vmf->page) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a @@ -3706,7 +3709,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); update_mmu_cache(vma, vmf->address, vmf->pte); - page = vm_normal_page(vma, vmf->address, pte); + page = __vm_normal_page(vma, vmf->address, pte, false, vmf->vma_flags); if (!page) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0;
When dealing with the speculative fault path we should use the VMA's field cached value stored in the vm_fault structure. Currently vm_normal_page() is using the pointer to the VMA to fetch the vm_flags value. This patch provides a new __vm_normal_page() which is receiving the vm_flags flags value as parameter. Note: The speculative path is turned on for architecture providing support for special PTE flag. So only the first block of vm_normal_page is used during the speculative path. Signed-off-by: Laurent Dufour <ldufour@linux.ibm.com> --- include/linux/mm.h | 18 +++++++++++++++--- mm/memory.c | 21 ++++++++++++--------- 2 files changed, 27 insertions(+), 12 deletions(-)