@@ -378,8 +378,7 @@ static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return rc;
}
-static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
- struct vm_area_struct *vma, struct vm_fault *vmf)
+static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
{
unsigned long pmd_addr = vmf->address & PMD_MASK;
struct device *dev = &dax_dev->dev;
@@ -388,7 +387,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
pgoff_t pgoff;
pfn_t pfn;
- if (check_vma(dax_dev, vma, __func__))
+ if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
dax_region = dax_dev->region;
@@ -403,7 +402,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
return VM_FAULT_SIGBUS;
}
- pgoff = linear_page_index(vma, pmd_addr);
+ pgoff = linear_page_index(vmf->vma, pmd_addr);
phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
if (phys == -1) {
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
@@ -413,22 +412,23 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- return vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
+ return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn,
vmf->flags & FAULT_FLAG_WRITE);
}
-static int dax_dev_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int dax_dev_pmd_fault(struct vm_fault *vmf)
{
int rc;
- struct file *filp = vma->vm_file;
+ struct file *filp = vmf->vma->vm_file;
struct dax_dev *dax_dev = filp->private_data;
dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
current->comm, (vmf->flags & FAULT_FLAG_WRITE)
- ? "write" : "read", vma->vm_start, vma->vm_end);
+ ? "write" : "read",
+ vmf->vma->vm_start, vmf->vma->vm_end);
rcu_read_lock();
- rc = __dax_dev_pmd_fault(dax_dev, vma, vmf);
+ rc = __dax_dev_pmd_fault(dax_dev, vmf);
rcu_read_unlock();
return rc;
@@ -1226,9 +1226,9 @@ EXPORT_SYMBOL_GPL(dax_iomap_fault);
*/
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
-static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
- struct vm_fault *vmf, unsigned long address,
- struct iomap *iomap, loff_t pos, bool write, void **entryp)
+static int dax_pmd_insert_mapping(struct vm_area_struct *vma,
+ struct vm_fault *vmf, struct iomap *iomap, loff_t pos,
+ bool write, void **entryp)
{
struct address_space *mapping = vma->vm_file->f_mapping;
struct block_device *bdev = iomap->bdev;
@@ -1257,24 +1257,23 @@ static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
goto fallback;
*entryp = ret;
- trace_dax_pmd_insert_mapping(inode, vma, address, write, length,
+ trace_dax_pmd_insert_mapping(inode, vmf, write, length,
dax.pfn, ret);
- return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
+ return vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, dax.pfn, write);
unmap_fallback:
dax_unmap_atomic(bdev, &dax);
fallback:
- trace_dax_pmd_insert_mapping_fallback(inode, vma, address, write,
- length, dax.pfn, ret);
+ trace_dax_pmd_insert_mapping_fallback(inode, vmf, write, length,
+ dax.pfn, ret);
return VM_FAULT_FALLBACK;
}
-static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
- struct vm_fault *vmf, unsigned long address,
+static int dax_pmd_load_hole(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap *iomap, void **entryp)
{
struct address_space *mapping = vma->vm_file->f_mapping;
- unsigned long pmd_addr = address & PMD_MASK;
+ unsigned long pmd_addr = vmf->address & PMD_MASK;
struct inode *inode = mapping->host;
struct page *zero_page;
void *ret = NULL;
@@ -1292,27 +1291,27 @@ static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
goto fallback;
*entryp = ret;
- ptl = pmd_lock(vma->vm_mm, pmd);
- if (!pmd_none(*pmd)) {
+ ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (!pmd_none(*(vmf->pmd))) {
spin_unlock(ptl);
goto fallback;
}
pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
pmd_entry = pmd_mkhuge(pmd_entry);
- set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
+ set_pmd_at(vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
spin_unlock(ptl);
- trace_dax_pmd_load_hole(inode, vma, address, zero_page, ret);
+ trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
return VM_FAULT_NOPAGE;
fallback:
- trace_dax_pmd_load_hole_fallback(inode, vma, address, zero_page, ret);
+ trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
return VM_FAULT_FALLBACK;
}
-int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
- struct iomap_ops *ops)
+int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops)
{
+ struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping;
unsigned long pmd_addr = vmf->address & PMD_MASK;
bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -1334,7 +1333,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
vmf->pgoff = linear_page_index(vma, pmd_addr);
max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
- trace_dax_pmd_fault(inode, vma, vmf, max_pgoff, 0);
+ trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
/* Fall back to PTEs if we're going to COW */
if (write && !(vma->vm_flags & VM_SHARED))
@@ -1379,15 +1378,14 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
switch (iomap.type) {
case IOMAP_MAPPED:
- result = dax_pmd_insert_mapping(vma, vmf->pmd, vmf,
- vmf->address, &iomap, pos, write, &entry);
+ result = dax_pmd_insert_mapping(vma, vmf, &iomap,
+ pos, write, &entry);
break;
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
if (WARN_ON_ONCE(write))
goto unlock_entry;
- result = dax_pmd_load_hole(vma, vmf->pmd, vmf, vmf->address,
- &iomap, &entry);
+ result = dax_pmd_load_hole(vma, vmf, &iomap, &entry);
break;
default:
WARN_ON_ONCE(1);
@@ -1417,7 +1415,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
count_vm_event(THP_FAULT_FALLBACK);
}
out:
- trace_dax_pmd_fault_done(inode, vma, vmf, max_pgoff, result);
+ trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
vmf->pgoff = old_pgoff;
return result;
}
@@ -279,23 +279,23 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
static int
-ext4_dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ext4_dax_pmd_fault(struct vm_fault *vmf)
{
int result;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct super_block *sb = inode->i_sb;
bool write = vmf->flags & FAULT_FLAG_WRITE;
gfp_t old_mask;
if (write) {
sb_start_pagefault(sb);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
}
old_mask = vmf->gfp_mask;
vmf->gfp_mask &= ~__GFP_FS;
down_read(&EXT4_I(inode)->i_mmap_sem);
- result = dax_iomap_pmd_fault(vma, vmf, &ext4_iomap_ops);
+ result = dax_iomap_pmd_fault(vmf, &ext4_iomap_ops);
up_read(&EXT4_I(inode)->i_mmap_sem);
vmf->gfp_mask = old_mask;
if (write)
@@ -1532,10 +1532,9 @@ xfs_filemap_fault(
*/
STATIC int
xfs_filemap_pmd_fault(
- struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct xfs_inode *ip = XFS_I(inode);
int ret;
gfp_t old_mask;
@@ -1547,13 +1546,13 @@ xfs_filemap_pmd_fault(
if (vmf->flags & FAULT_FLAG_WRITE) {
sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
}
old_mask = vmf->gfp_mask;
vmf->gfp_mask &= ~__GFP_FS;
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- ret = dax_iomap_pmd_fault(vma, vmf, &xfs_iomap_ops);
+ ret = dax_iomap_pmd_fault(vmf, &xfs_iomap_ops);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
vmf->gfp_mask = old_mask;
@@ -71,15 +71,14 @@ static inline unsigned int dax_radix_order(void *entry)
return PMD_SHIFT - PAGE_SHIFT;
return 0;
}
-int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
- struct iomap_ops *ops);
+int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops);
#else
static inline unsigned int dax_radix_order(void *entry)
{
return 0;
}
-static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf, struct iomap_ops *ops)
+static inline int dax_iomap_pmd_fault(struct vm_fault *vmf,
+ struct iomap_ops *ops)
{
return VM_FAULT_FALLBACK;
}
@@ -347,7 +347,7 @@ struct vm_operations_struct {
void (*close)(struct vm_area_struct * area);
int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
- int (*pmd_fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int (*pmd_fault)(struct vm_fault *vmf);
void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
@@ -7,9 +7,9 @@
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(dax_pmd_fault_class,
- TP_PROTO(struct inode *inode, struct vm_area_struct *vma,
- struct vm_fault *vmf, pgoff_t max_pgoff, int result),
- TP_ARGS(inode, vma, vmf, max_pgoff, result),
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf,
+ pgoff_t max_pgoff, int result),
+ TP_ARGS(inode, vmf, max_pgoff, result),
TP_STRUCT__entry(
__field(unsigned long, ino)
__field(unsigned long, vm_start)
@@ -25,9 +25,9 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->vm_start = vma->vm_start;
- __entry->vm_end = vma->vm_end;
- __entry->vm_flags = vma->vm_flags;
+ __entry->vm_start = vmf->vma->vm_start;
+ __entry->vm_end = vmf->vma->vm_end;
+ __entry->vm_flags = vmf->vma->vm_flags;
__entry->address = vmf->address;
__entry->flags = vmf->flags;
__entry->pgoff = vmf->pgoff;
@@ -52,19 +52,18 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
#define DEFINE_PMD_FAULT_EVENT(name) \
DEFINE_EVENT(dax_pmd_fault_class, name, \
- TP_PROTO(struct inode *inode, struct vm_area_struct *vma, \
- struct vm_fault *vmf, \
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
pgoff_t max_pgoff, int result), \
- TP_ARGS(inode, vma, vmf, max_pgoff, result))
+ TP_ARGS(inode, vmf, max_pgoff, result))
DEFINE_PMD_FAULT_EVENT(dax_pmd_fault);
DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done);
DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
- TP_PROTO(struct inode *inode, struct vm_area_struct *vma,
- unsigned long address, struct page *zero_page,
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf,
+ struct page *zero_page,
void *radix_entry),
- TP_ARGS(inode, vma, address, zero_page, radix_entry),
+ TP_ARGS(inode, vmf, zero_page, radix_entry),
TP_STRUCT__entry(
__field(unsigned long, ino)
__field(unsigned long, vm_flags)
@@ -76,8 +75,8 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->vm_flags = vma->vm_flags;
- __entry->address = address;
+ __entry->vm_flags = vmf->vma->vm_flags;
+ __entry->address = vmf->address;
__entry->zero_page = zero_page;
__entry->radix_entry = radix_entry;
),
@@ -95,19 +94,18 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
#define DEFINE_PMD_LOAD_HOLE_EVENT(name) \
DEFINE_EVENT(dax_pmd_load_hole_class, name, \
- TP_PROTO(struct inode *inode, struct vm_area_struct *vma, \
- unsigned long address, struct page *zero_page, \
- void *radix_entry), \
- TP_ARGS(inode, vma, address, zero_page, radix_entry))
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
+ struct page *zero_page, void *radix_entry), \
+ TP_ARGS(inode, vmf, zero_page, radix_entry))
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);
DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
- TP_PROTO(struct inode *inode, struct vm_area_struct *vma,
- unsigned long address, int write, long length, pfn_t pfn,
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf,
+ int write, long length, pfn_t pfn,
void *radix_entry),
- TP_ARGS(inode, vma, address, write, length, pfn, radix_entry),
+ TP_ARGS(inode, vmf, write, length, pfn, radix_entry),
TP_STRUCT__entry(
__field(unsigned long, ino)
__field(unsigned long, vm_flags)
@@ -121,8 +119,8 @@ DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->vm_flags = vma->vm_flags;
- __entry->address = address;
+ __entry->vm_flags = vmf->vma->vm_flags;
+ __entry->address = vmf->address;
__entry->write = write;
__entry->length = length;
__entry->pfn_val = pfn.val;
@@ -146,10 +144,9 @@ DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
#define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \
DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
- TP_PROTO(struct inode *inode, struct vm_area_struct *vma, \
- unsigned long address, int write, long length, pfn_t pfn, \
- void *radix_entry), \
- TP_ARGS(inode, vma, address, write, length, pfn, radix_entry))
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
+ int write, long length, pfn_t pfn, void *radix_entry), \
+ TP_ARGS(inode, vmf, write, length, pfn, radix_entry))
DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback);
@@ -3443,11 +3443,10 @@ static int do_numa_page(struct vm_fault *vmf)
static int create_huge_pmd(struct vm_fault *vmf)
{
- struct vm_area_struct *vma = vmf->vma;
- if (vma_is_anonymous(vma))
+ if (vma_is_anonymous(vmf->vma))
return do_huge_pmd_anonymous_page(vmf);
- if (vma->vm_ops->pmd_fault)
- return vma->vm_ops->pmd_fault(vma, vmf);
+ if (vmf->vma->vm_ops->pmd_fault)
+ return vmf->vma->vm_ops->pmd_fault(vmf);
return VM_FAULT_FALLBACK;
}
@@ -3456,7 +3455,7 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
if (vma_is_anonymous(vmf->vma))
return do_huge_pmd_wp_page(vmf, orig_pmd);
if (vmf->vma->vm_ops->pmd_fault)
- return vmf->vma->vm_ops->pmd_fault(vmf->vma, vmf);
+ return vmf->vma->vm_ops->pmd_fault(vmf);
/* COW handled on pte level: split pmd */
VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);