@@ -580,11 +580,6 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
error = PTR_ERR(dax.addr);
goto out;
}
-
- if (buffer_unwritten(bh) || buffer_new(bh)) {
- clear_pmem(dax.addr, PAGE_SIZE);
- wmb_pmem();
- }
dax_unmap_atomic(bdev, &dax);
error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
@@ -661,7 +656,7 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
if (error)
goto unlock_page;
- if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
+ if (!buffer_mapped(&bh) && !vmf->cow_page) {
if (vmf->flags & FAULT_FLAG_WRITE) {
error = get_block(inode, block, &bh, 1);
count_vm_event(PGMAJFAULT);
@@ -933,8 +928,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
}
if (buffer_unwritten(&bh) || buffer_new(&bh)) {
- clear_pmem(dax.addr, PMD_SIZE);
- wmb_pmem();
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
result |= VM_FAULT_MAJOR;
When a fault to a hole races with write filling the hole, it can happen that block zeroing in __dax_fault() overwrites the data copied by write. Since filesystem is supposed to provide pre-zeroed blocks for fault anyway, just remove the racy zeroing from dax code. The only catch is with read-faults over unwritten block where __dax_fault() filled in the block into page tables anyway. For that case we have to fall back to using hole page now. Signed-off-by: Jan Kara <jack@suse.cz> --- fs/dax.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-)