@@ -611,7 +611,8 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
bch2_folio_reservation_init(c, inode, &res);
- sb_start_pagefault(inode->v.i_sb);
+ if (sb_start_pagefault(inode->v.i_sb) < 0)
+ return VM_FAULT_SIGBUS;
file_update_time(file);
/*
@@ -1900,7 +1900,8 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
reserved_space = PAGE_SIZE;
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
page_start = page_offset(page);
page_end = page_start + PAGE_SIZE - 1;
end = page_end;
@@ -1686,7 +1686,9 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
if (!prealloc_cf)
return VM_FAULT_OOM;
- sb_start_pagefault(inode->i_sb);
+ err = sb_start_pagefault(inode->i_sb);
+ if (err)
+ goto out_free;
ceph_block_sigs(&oldset);
if (off + thp_size(page) <= size)
@@ -1704,7 +1706,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
got = 0;
err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got);
if (err < 0)
- goto out_free;
+ goto out_sigs;
doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode),
off, len, ceph_cap_string(got));
@@ -1758,9 +1760,10 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n",
ceph_vinop(inode), off, len, ceph_cap_string(got), ret);
ceph_put_cap_refs_async(ci, got);
-out_free:
+out_sigs:
ceph_restore_sigs(&oldset);
sb_end_pagefault(inode->i_sb);
+out_free:
ceph_free_cap_flush(prealloc_cf);
if (err < 0)
ret = vmf_error(err);
@@ -98,7 +98,8 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
(vmf->vma->vm_flags & VM_SHARED);
if (write) {
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
file_update_time(vmf->vma->vm_file);
}
filemap_invalidate_lock_shared(inode->i_mapping);
@@ -725,7 +725,8 @@ static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
pfn_t pfn;
if (write) {
- sb_start_pagefault(sb);
+ if (sb_start_pagefault(sb) < 0)
+ return VM_FAULT_SIGBUS;
file_update_time(vmf->vma->vm_file);
filemap_invalidate_lock_shared(mapping);
retry:
@@ -6128,7 +6128,8 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
if (unlikely(IS_IMMUTABLE(inode)))
return VM_FAULT_SIGBUS;
- sb_start_pagefault(inode->i_sb);
+ if (unlikely(sb_start_pagefault(inode->i_sb) < 0))
+ return VM_FAULT_SIGBUS;
file_update_time(vma->vm_file);
filemap_invalidate_lock_shared(mapping);
@@ -100,7 +100,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
if (need_alloc)
f2fs_balance_fs(sbi, true);
- sb_start_pagefault(inode->i_sb);
+ err = sb_start_pagefault(inode->i_sb);
+ if (err)
+ goto out;
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
@@ -797,7 +797,8 @@ static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order,
bool retry = false;
if (write)
- sb_start_pagefault(sb);
+ if (sb_start_pagefault(sb) < 0)
+ return VM_FAULT_SIGBUS;
retry:
if (retry && !(fcd->nr_free_ranges > 0))
wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0));
@@ -427,7 +427,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
loff_t size;
int err;
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
err = gfs2_glock_nq(&gh);
@@ -531,7 +531,8 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
_enter("%lx", folio->index);
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
if (folio_lock_killable(folio) < 0)
goto out;
@@ -593,7 +593,8 @@ static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
filp, filp->f_mapping->host->i_ino,
(long long)folio_pos(folio));
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
/* make sure the cache has finished storing the page */
if (folio_test_private_2(folio) && /* [DEPRECATED] */
@@ -54,7 +54,8 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
return VM_FAULT_SIGBUS; /* -ENOSPC */
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
folio_lock(folio);
if (folio->mapping != inode->i_mapping ||
folio_pos(folio) >= i_size_read(inode) ||
@@ -119,7 +119,8 @@ static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
int err;
vm_fault_t ret;
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
ocfs2_block_signals(&oldset);
/*
@@ -632,7 +632,8 @@ vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
vm_fault_t ret;
struct orangefs_write_range *wr;
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
if (wait_on_bit(bitlock, 1, TASK_KILLABLE)) {
ret = VM_FAULT_RETRY;
@@ -45,7 +45,8 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
vm_fault_t ret = VM_FAULT_LOCKED;
int err;
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
file_update_time(vma->vm_file);
filemap_invalidate_lock_shared(mapping);
folio_lock(folio);
@@ -1283,7 +1283,8 @@ xfs_write_fault(
unsigned int lock_mode = XFS_MMAPLOCK_SHARED;
vm_fault_t ret;
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
file_update_time(vmf->vma->vm_file);
/*
@@ -294,7 +294,8 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
if (zonefs_inode_is_seq(inode))
return VM_FAULT_NOPAGE;
- sb_start_pagefault(inode->i_sb);
+ if (sb_start_pagefault(inode->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
file_update_time(vmf->vma->vm_file);
/* Serialize against truncates */
@@ -1858,9 +1858,12 @@ static inline bool __must_check sb_start_write_trylock(struct super_block *sb)
* mmap_lock
* -> sb_start_pagefault
*/
-static inline void sb_start_pagefault(struct super_block *sb)
+static inline int __must_check sb_start_pagefault(struct super_block *sb)
{
+ if (sb_test_iflag(sb, SB_I_SHUTDOWN))
+ return -EROFS;
__sb_start_write(sb, SB_FREEZE_PAGEFAULT);
+ return 0;
}
/**
@@ -3672,7 +3672,8 @@ vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
struct folio *folio = page_folio(vmf->page);
vm_fault_t ret = VM_FAULT_LOCKED;
- sb_start_pagefault(mapping->host->i_sb);
+ if (sb_start_pagefault(mapping->host->i_sb) < 0)
+ return VM_FAULT_SIGBUS;
file_update_time(vmf->vma->vm_file);
folio_lock(folio);
if (folio->mapping != mapping) {
Similarly to sb_start_write(), make sb_start_pagefault() return errors for superblocks which are marked as shutdown to avoid modifications to it which reduces noise in the error logs and generally makes life somewhat easier for filesystems. We teach all sb_start_pagefault() callers to handle the error. Signed-off-by: Jan Kara <jack@suse.cz> --- fs/bcachefs/fs-io-pagecache.c | 3 ++- fs/btrfs/file.c | 3 ++- fs/ceph/addr.c | 9 ++++++--- fs/ext2/file.c | 3 ++- fs/ext4/file.c | 3 ++- fs/ext4/inode.c | 3 ++- fs/f2fs/file.c | 4 +++- fs/fuse/dax.c | 3 ++- fs/gfs2/file.c | 3 ++- fs/netfs/buffered_write.c | 3 ++- fs/nfs/file.c | 3 ++- fs/nilfs2/file.c | 3 ++- fs/ocfs2/mmap.c | 3 ++- fs/orangefs/inode.c | 3 ++- fs/udf/file.c | 3 ++- fs/xfs/xfs_file.c | 3 ++- fs/zonefs/file.c | 3 ++- include/linux/fs.h | 5 ++++- mm/filemap.c | 3 ++- 19 files changed, 45 insertions(+), 21 deletions(-)