@@ -3802,6 +3802,7 @@ int btree_readahead_hook(struct extent_buffer *eb, int err);
/* dax.c */
ssize_t btrfs_file_dax_read(struct kiocb *iocb, struct iov_iter *to);
ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *from);
+vm_fault_t btrfs_dax_fault(struct vm_fault *vmf);
#else
static inline ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *from)
{
@@ -139,7 +139,7 @@ static int btrfs_iomap_begin(struct inode *inode, loff_t pos,
iomap->addr = em->block_start + diff;
/* Check if we really need to copy data from old extent */
- if (bi && !bi->nocow && (offset || pos + length < bi->end)) {
+ if (bi && !bi->nocow && (offset || pos + length < bi->end || flags & IOMAP_FAULT)) {
iomap->type = IOMAP_DAX_COW;
if (srcblk) {
sector_t sector = (srcblk + (pos & PAGE_MASK) -
@@ -216,4 +216,15 @@ ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *iter)
}
return ret;
}
+
+vm_fault_t btrfs_dax_fault(struct vm_fault *vmf)
+{
+ vm_fault_t ret;
+ pfn_t pfn;
+ ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &pfn, NULL, &btrfs_iomap_ops);
+ if (ret & VM_FAULT_NEEDDSYNC)
+ ret = dax_finish_sync_fault(vmf, PE_SIZE_PTE, pfn);
+
+ return ret;
+}
#endif /* CONFIG_FS_DAX */
@@ -2214,15 +2214,29 @@ static const struct vm_operations_struct btrfs_file_vm_ops = {
.page_mkwrite = btrfs_page_mkwrite,
};
+#ifdef CONFIG_FS_DAX
+static const struct vm_operations_struct btrfs_dax_vm_ops = {
+ .fault = btrfs_dax_fault,
+ .page_mkwrite = btrfs_dax_fault,
+ .pfn_mkwrite = btrfs_dax_fault,
+};
+#else
+#define btrfs_dax_vm_ops btrfs_file_vm_ops
+#endif
+
static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct address_space *mapping = filp->f_mapping;
+ struct inode *inode = file_inode(filp);
- if (!mapping->a_ops->readpage)
+ if (!IS_DAX(inode) && !mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(filp);
- vma->vm_ops = &btrfs_file_vm_ops;
+ if (IS_DAX(inode))
+ vma->vm_ops = &btrfs_dax_vm_ops;
+ else
+ vma->vm_ops = &btrfs_file_vm_ops;
return 0;
}