@@ -736,7 +736,7 @@ static int dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
unsigned long pmd_addr = address & PMD_MASK;
bool write = vmf->flags & FAULT_FLAG_WRITE;
struct block_device *bdev;
- pgoff_t size, pgoff;
+ pgoff_t size;
sector_t block;
int error, result = 0;
bool alloc = false;
@@ -761,12 +761,11 @@ static int dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
return VM_FAULT_FALLBACK;
}
- pgoff = linear_page_index(vma, pmd_addr);
size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (pgoff >= size)
+ if (vmf->pgoff >= size)
return VM_FAULT_SIGBUS;
/* If the PMD would cover blocks out of the file */
- if ((pgoff | PG_PMD_COLOUR) >= size) {
+ if ((vmf->pgoff | PG_PMD_COLOUR) >= size) {
dax_pmd_dbg(NULL, address,
"offset + huge page size > file size");
return VM_FAULT_FALLBACK;
@@ -774,7 +773,7 @@ static int dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
memset(&bh, 0, sizeof(bh));
bh.b_bdev = inode->i_sb->s_bdev;
- block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
+ block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
bh.b_size = PMD_SIZE;
@@ -804,7 +803,7 @@ static int dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
* zero pages covering this hole
*/
if (alloc) {
- loff_t lstart = pgoff << PAGE_SHIFT;
+ loff_t lstart = vmf->pgoff << PAGE_SHIFT;
loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
truncate_pagecache_range(inode, lstart, lend);
@@ -890,8 +889,8 @@ static int dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
* the write to insert a dirty entry.
*/
if (write) {
- error = dax_radix_entry(mapping, pgoff, dax.sector,
- true, true);
+ error = dax_radix_entry(mapping, vmf->pgoff,
+ dax.sector, true, true);
if (error) {
dax_pmd_dbg(&bh, address,
"PMD radix insertion failed");
@@ -942,7 +941,7 @@ static int dax_pud_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
unsigned long pud_addr = address & PUD_MASK;
bool write = vmf->flags & FAULT_FLAG_WRITE;
struct block_device *bdev;
- pgoff_t size, pgoff;
+ pgoff_t size;
sector_t block;
int result = 0;
bool alloc = false;
@@ -967,12 +966,11 @@ static int dax_pud_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
return VM_FAULT_FALLBACK;
}
- pgoff = linear_page_index(vma, pud_addr);
size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (pgoff >= size)
+ if (vmf->pgoff >= size)
return VM_FAULT_SIGBUS;
/* If the PUD would cover blocks out of the file */
- if ((pgoff | PG_PUD_COLOUR) >= size) {
+ if ((vmf->pgoff | PG_PUD_COLOUR) >= size) {
dax_pud_dbg(NULL, address,
"offset + huge page size > file size");
return VM_FAULT_FALLBACK;
@@ -980,7 +978,7 @@ static int dax_pud_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
memset(&bh, 0, sizeof(bh));
bh.b_bdev = inode->i_sb->s_bdev;
- block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
+ block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
bh.b_size = PUD_SIZE;
@@ -1010,7 +1008,7 @@ static int dax_pud_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
* zero pages covering this hole
*/
if (alloc) {
- loff_t lstart = pgoff << PAGE_SHIFT;
+ loff_t lstart = vmf->pgoff << PAGE_SHIFT;
loff_t lend = lstart + PUD_SIZE - 1; /* inclusive */
truncate_pagecache_range(inode, lstart, lend);
Now that the PMD and PUD fault handlers are passed pgoff, there's no need to calculate it themselves. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com> --- fs/dax.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-)