diff mbox series

[12/16] btrfs: Perform memory faults under locked extent

Message ID 68054cb6c0be33ef7c55ef8d4b5f4dda0a0d9cbf.1668530684.git.rgoldwyn@suse.com (mailing list archive)
State New, archived
Headers show
Series Lock extents before pages | expand

Commit Message

Goldwyn Rodrigues Nov. 15, 2022, 6 p.m. UTC
As a part of locking extents before pages, lock entire memfault region
while servicing faults.

Remove extent locking from page_mkwrite(), since it is part of the
fault.

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
---
 fs/btrfs/file.c  | 18 +++++++++++++++++-
 fs/btrfs/inode.c |  6 ------
 2 files changed, 17 insertions(+), 7 deletions(-)

Comments

Josef Bacik Dec. 13, 2022, 7:12 p.m. UTC | #1
On Tue, Nov 15, 2022 at 12:00:30PM -0600, Goldwyn Rodrigues wrote:
> As a part of locking extents before pages, lock entire memfault region
> while servicing faults.
> 

Again this is going to slow us down for the same reason the readpages change
slowed us down.  If we have the unlocked variant of readahead then we get this
for free, because filemap_fault will just use that, and we'll already have our
locking order done.

> Remove extent locking from page_mkwrite(), since it is part of the
> fault.
> 

It's actually not, it happens separately, we do the fault, come back into the
main fault logic, unlock the page, and call ->page_mkwrite().  So for
page_mkwrite we definitely need to keep the locking, we just need to change the
order, and that should be in it's own patch.  Thanks,

Josef
diff mbox series

Patch

diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 559214ded4eb..876e0d81b191 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1980,8 +1980,24 @@  int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	goto out;
 }
 
+static vm_fault_t btrfs_fault(struct vm_fault *vmf)
+{
+	struct extent_state *cached_state = NULL;
+	struct inode *inode = file_inode(vmf->vma->vm_file);
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	u64 page_start = vmf->pgoff;
+	u64 page_end = page_start + PAGE_SIZE - 1;
+	vm_fault_t ret;
+
+	lock_extent(io_tree, page_start, page_end, &cached_state);
+	ret = filemap_fault(vmf);
+	unlock_extent(io_tree, page_start, page_end, &cached_state);
+
+	return ret;
+}
+
 static const struct vm_operations_struct btrfs_file_vm_ops = {
-	.fault		= filemap_fault,
+	.fault		= btrfs_fault,
 	.map_pages	= filemap_map_pages,
 	.page_mkwrite	= btrfs_page_mkwrite,
 };
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 070fb7071e39..b421260d52e2 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8530,7 +8530,6 @@  vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
 	struct page *page = vmf->page;
 	struct inode *inode = file_inode(vmf->vma->vm_file);
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct btrfs_ordered_extent *ordered;
 	struct extent_state *cached_state = NULL;
 	struct extent_changeset *data_reserved = NULL;
@@ -8585,11 +8584,9 @@  vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
 	}
 	wait_on_page_writeback(page);
 
-	lock_extent(io_tree, page_start, page_end, &cached_state);
 	ret2 = set_page_extent_mapped(page);
 	if (ret2 < 0) {
 		ret = vmf_error(ret2);
-		unlock_extent(io_tree, page_start, page_end, &cached_state);
 		goto out_unlock;
 	}
 
@@ -8600,7 +8597,6 @@  vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
 			PAGE_SIZE);
 	if (ordered) {
-		unlock_extent(io_tree, page_start, page_end, &cached_state);
 		unlock_page(page);
 		up_read(&BTRFS_I(inode)->i_mmap_lock);
 		btrfs_start_ordered_extent(ordered, 1);
@@ -8633,7 +8629,6 @@  vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
 	ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
 					&cached_state);
 	if (ret2) {
-		unlock_extent(io_tree, page_start, page_end, &cached_state);
 		ret = VM_FAULT_SIGBUS;
 		goto out_unlock;
 	}
@@ -8653,7 +8648,6 @@  vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
 
 	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
 
-	unlock_extent(io_tree, page_start, page_end, &cached_state);
 	up_read(&BTRFS_I(inode)->i_mmap_lock);
 
 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);