Message ID | 20231211155652.131054-9-david@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/rmap: interface overhaul | expand |
On 11/12/2023 15:56, David Hildenbrand wrote: > Let's convert insert_page_into_pte_locked() and do_set_pmd(). While at it, > perform some folio conversion. > > Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> > Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> > --- > mm/memory.c | 14 ++++++++------ > 1 file changed, 8 insertions(+), 6 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index 6a5540ba3c65..70754fd65788 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -1859,12 +1859,14 @@ static int validate_page_before_insert(struct page *page) > static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, > unsigned long addr, struct page *page, pgprot_t prot) > { > + struct folio *folio = page_folio(page); > + > if (!pte_none(ptep_get(pte))) > return -EBUSY; > /* Ok, finally just insert the thing.. */ > - get_page(page); > + folio_get(folio); > inc_mm_counter(vma->vm_mm, mm_counter_file(page)); > - page_add_file_rmap(page, vma, false); > + folio_add_file_rmap_pte(folio, page, vma); > set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); > return 0; > } > @@ -4409,6 +4411,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf) > > vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) > { > + struct folio *folio = page_folio(page); > struct vm_area_struct *vma = vmf->vma; > bool write = vmf->flags & FAULT_FLAG_WRITE; > unsigned long haddr = vmf->address & HPAGE_PMD_MASK; > @@ -4418,8 +4421,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) > if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) > return ret; > > - page = compound_head(page); > - if (compound_order(page) != HPAGE_PMD_ORDER) > + if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER) > return ret; > > /* > @@ -4428,7 +4430,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) > * check. This kind of THP just can be PTE mapped. Access to > * the corrupted subpage should trigger SIGBUS as expected. > */ > - if (unlikely(PageHasHWPoisoned(page))) > + if (unlikely(folio_test_has_hwpoisoned(folio))) > return ret; > > /* > @@ -4452,7 +4454,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) > entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); > > add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); > - page_add_file_rmap(page, vma, true); > + folio_add_file_rmap_pmd(folio, page, vma); > > /* > * deposit and withdraw with pmd lock held
diff --git a/mm/memory.c b/mm/memory.c index 6a5540ba3c65..70754fd65788 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1859,12 +1859,14 @@ static int validate_page_before_insert(struct page *page) static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { + struct folio *folio = page_folio(page); + if (!pte_none(ptep_get(pte))) return -EBUSY; /* Ok, finally just insert the thing.. */ - get_page(page); + folio_get(folio); inc_mm_counter(vma->vm_mm, mm_counter_file(page)); - page_add_file_rmap(page, vma, false); + folio_add_file_rmap_pte(folio, page, vma); set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); return 0; } @@ -4409,6 +4411,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf) vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma = vmf->vma; bool write = vmf->flags & FAULT_FLAG_WRITE; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; @@ -4418,8 +4421,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) return ret; - page = compound_head(page); - if (compound_order(page) != HPAGE_PMD_ORDER) + if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER) return ret; /* @@ -4428,7 +4430,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) * check. This kind of THP just can be PTE mapped. Access to * the corrupted subpage should trigger SIGBUS as expected. */ - if (unlikely(PageHasHWPoisoned(page))) + if (unlikely(folio_test_has_hwpoisoned(folio))) return ret; /* @@ -4452,7 +4454,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); - page_add_file_rmap(page, vma, true); + folio_add_file_rmap_pmd(folio, page, vma); /* * deposit and withdraw with pmd lock held