@@ -1207,17 +1207,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = mapping->host;
pgtable_t pgtable = NULL;
- struct page *zero_page;
+ struct folio *zero_folio;
spinlock_t *ptl;
pmd_t pmd_entry;
pfn_t pfn;
- zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
+ zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
- if (unlikely(!zero_page))
+ if (unlikely(!zero_folio))
goto fallback;
- pfn = page_to_pfn_t(zero_page);
+ pfn = page_to_pfn_t(&zero_folio->page);
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
DAX_PMD | DAX_ZERO_PAGE);
@@ -1237,17 +1237,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
mm_inc_nr_ptes(vma->vm_mm);
}
- pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
+ pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
pmd_entry = pmd_mkhuge(pmd_entry);
set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
spin_unlock(ptl);
- trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
+ trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
return VM_FAULT_NOPAGE;
fallback:
if (pgtable)
pte_free(vma->vm_mm, pgtable);
- trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
+ trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
return VM_FAULT_FALLBACK;
}
#else
@@ -62,14 +62,14 @@ DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done);
DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
TP_PROTO(struct inode *inode, struct vm_fault *vmf,
- struct page *zero_page,
+ struct folio *zero_folio,
void *radix_entry),
- TP_ARGS(inode, vmf, zero_page, radix_entry),
+ TP_ARGS(inode, vmf, zero_folio, radix_entry),
TP_STRUCT__entry(
__field(unsigned long, ino)
__field(unsigned long, vm_flags)
__field(unsigned long, address)
- __field(struct page *, zero_page)
+ __field(struct folio *, zero_folio)
__field(void *, radix_entry)
__field(dev_t, dev)
),
@@ -78,17 +78,17 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
__entry->ino = inode->i_ino;
__entry->vm_flags = vmf->vma->vm_flags;
__entry->address = vmf->address;
- __entry->zero_page = zero_page;
+ __entry->zero_folio = zero_folio;
__entry->radix_entry = radix_entry;
),
- TP_printk("dev %d:%d ino %#lx %s address %#lx zero_page %p "
+ TP_printk("dev %d:%d ino %#lx %s address %#lx zero_folio %p "
"radix_entry %#lx",
MAJOR(__entry->dev),
MINOR(__entry->dev),
__entry->ino,
__entry->vm_flags & VM_SHARED ? "shared" : "private",
__entry->address,
- __entry->zero_page,
+ __entry->zero_folio,
(unsigned long)__entry->radix_entry
)
)
@@ -96,8 +96,8 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
#define DEFINE_PMD_LOAD_HOLE_EVENT(name) \
DEFINE_EVENT(dax_pmd_load_hole_class, name, \
TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
- struct page *zero_page, void *radix_entry), \
- TP_ARGS(inode, vmf, zero_page, radix_entry))
+ struct folio *zero_folio, void *radix_entry), \
+ TP_ARGS(inode, vmf, zero_folio, radix_entry))
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);
Convert from huge_zero_page to huge_zero_folio. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- fs/dax.c | 14 +++++++------- include/trace/events/fs_dax.h | 16 ++++++++-------- 2 files changed, 15 insertions(+), 15 deletions(-)