Message ID | 20210512134631.4053-8-jack@suse.cz (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | fs: Hole punch vs page cache filling races | expand |
Hi Jan,
I love your patch! Yet something to improve:
[auto build test ERROR on linus/master]
[also build test ERROR on v5.13-rc1]
[cannot apply to hnaz-linux-mm/master ext4/dev fuse/for-next next-20210512]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Jan-Kara/fs-Hole-punch-vs-page-cache-filling-races/20210512-214713
base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 88b06399c9c766c283e070b022b5ceafa4f63f19
config: x86_64-rhel-8.3 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build):
# https://github.com/0day-ci/linux/commit/12e7111c8a1e839ea70ac4c8bf24677466cbe767
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Jan-Kara/fs-Hole-punch-vs-page-cache-filling-races/20210512-214713
git checkout 12e7111c8a1e839ea70ac4c8bf24677466cbe767
# save the attached .config to linux build tree
make W=1 W=1 ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
fs/f2fs/file.c: In function 'f2fs_file_write_iter':
>> fs/f2fs/file.c:4314:29: error: 'struct f2fs_inode_info' has no member named 'i_mmap_sem'
4314 | down_write(&F2FS_I(inode)->i_mmap_sem);
| ^~
fs/f2fs/file.c:4316:27: error: 'struct f2fs_inode_info' has no member named 'i_mmap_sem'
4316 | up_write(&F2FS_I(inode)->i_mmap_sem);
| ^~
vim +4314 fs/f2fs/file.c
4c8ff7095bef64 Chao Yu 2019-11-01 4223
fcc85a4d86b501 Jaegeuk Kim 2015-04-21 4224 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
fcc85a4d86b501 Jaegeuk Kim 2015-04-21 4225 {
b439b103a6c9eb Jaegeuk Kim 2016-02-03 4226 struct file *file = iocb->ki_filp;
b439b103a6c9eb Jaegeuk Kim 2016-02-03 4227 struct inode *inode = file_inode(file);
b439b103a6c9eb Jaegeuk Kim 2016-02-03 4228 ssize_t ret;
fcc85a4d86b501 Jaegeuk Kim 2015-04-21 4229
126ce7214d2134 Chao Yu 2019-04-02 4230 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
126ce7214d2134 Chao Yu 2019-04-02 4231 ret = -EIO;
126ce7214d2134 Chao Yu 2019-04-02 4232 goto out;
126ce7214d2134 Chao Yu 2019-04-02 4233 }
1f227a3e215d36 Jaegeuk Kim 2017-10-23 4234
7bd2935870c050 Chao Yu 2020-02-24 4235 if (!f2fs_is_compress_backend_ready(inode)) {
7bd2935870c050 Chao Yu 2020-02-24 4236 ret = -EOPNOTSUPP;
7bd2935870c050 Chao Yu 2020-02-24 4237 goto out;
7bd2935870c050 Chao Yu 2020-02-24 4238 }
4c8ff7095bef64 Chao Yu 2019-11-01 4239
126ce7214d2134 Chao Yu 2019-04-02 4240 if (iocb->ki_flags & IOCB_NOWAIT) {
cb8434f16479b6 Goldwyn Rodrigues 2019-09-11 4241 if (!inode_trylock(inode)) {
126ce7214d2134 Chao Yu 2019-04-02 4242 ret = -EAGAIN;
126ce7214d2134 Chao Yu 2019-04-02 4243 goto out;
126ce7214d2134 Chao Yu 2019-04-02 4244 }
cb8434f16479b6 Goldwyn Rodrigues 2019-09-11 4245 } else {
b439b103a6c9eb Jaegeuk Kim 2016-02-03 4246 inode_lock(inode);
b91050a80cec3d Hyunchul Lee 2018-03-08 4247 }
b91050a80cec3d Hyunchul Lee 2018-03-08 4248
e0fcd01510ad02 Chao Yu 2020-12-26 4249 if (unlikely(IS_IMMUTABLE(inode))) {
e0fcd01510ad02 Chao Yu 2020-12-26 4250 ret = -EPERM;
e0fcd01510ad02 Chao Yu 2020-12-26 4251 goto unlock;
e0fcd01510ad02 Chao Yu 2020-12-26 4252 }
e0fcd01510ad02 Chao Yu 2020-12-26 4253
b439b103a6c9eb Jaegeuk Kim 2016-02-03 4254 ret = generic_write_checks(iocb, from);
b439b103a6c9eb Jaegeuk Kim 2016-02-03 4255 if (ret > 0) {
dc7a10ddee0c56 Jaegeuk Kim 2018-03-30 4256 bool preallocated = false;
dc7a10ddee0c56 Jaegeuk Kim 2018-03-30 4257 size_t target_size = 0;
dc91de78e5e1d4 Jaegeuk Kim 2017-01-13 4258 int err;
dc91de78e5e1d4 Jaegeuk Kim 2017-01-13 4259
dc91de78e5e1d4 Jaegeuk Kim 2017-01-13 4260 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
dc91de78e5e1d4 Jaegeuk Kim 2017-01-13 4261 set_inode_flag(inode, FI_NO_PREALLOC);
a7de608691f766 Jaegeuk Kim 2016-11-11 4262
d5d5f0c0c9160f Chengguang Xu 2019-04-23 4263 if ((iocb->ki_flags & IOCB_NOWAIT)) {
b91050a80cec3d Hyunchul Lee 2018-03-08 4264 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
b91050a80cec3d Hyunchul Lee 2018-03-08 4265 iov_iter_count(from)) ||
b91050a80cec3d Hyunchul Lee 2018-03-08 4266 f2fs_has_inline_data(inode) ||
d5d5f0c0c9160f Chengguang Xu 2019-04-23 4267 f2fs_force_buffered_io(inode, iocb, from)) {
d5d5f0c0c9160f Chengguang Xu 2019-04-23 4268 clear_inode_flag(inode, FI_NO_PREALLOC);
b91050a80cec3d Hyunchul Lee 2018-03-08 4269 inode_unlock(inode);
126ce7214d2134 Chao Yu 2019-04-02 4270 ret = -EAGAIN;
126ce7214d2134 Chao Yu 2019-04-02 4271 goto out;
b91050a80cec3d Hyunchul Lee 2018-03-08 4272 }
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4273 goto write;
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4274 }
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4275
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4276 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4277 goto write;
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4278
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4279 if (iocb->ki_flags & IOCB_DIRECT) {
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4280 /*
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4281 * Convert inline data for Direct I/O before entering
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4282 * f2fs_direct_IO().
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4283 */
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4284 err = f2fs_convert_inline_inode(inode);
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4285 if (err)
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4286 goto out_err;
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4287 /*
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4288 * If force_buffere_io() is true, we have to allocate
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4289 * blocks all the time, since f2fs_direct_IO will fall
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4290 * back to buffered IO.
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4291 */
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4292 if (!f2fs_force_buffered_io(inode, iocb, from) &&
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4293 allow_outplace_dio(inode, iocb, from))
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4294 goto write;
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4295 }
dc7a10ddee0c56 Jaegeuk Kim 2018-03-30 4296 preallocated = true;
dc7a10ddee0c56 Jaegeuk Kim 2018-03-30 4297 target_size = iocb->ki_pos + iov_iter_count(from);
dc7a10ddee0c56 Jaegeuk Kim 2018-03-30 4298
dc91de78e5e1d4 Jaegeuk Kim 2017-01-13 4299 err = f2fs_preallocate_blocks(iocb, from);
a7de608691f766 Jaegeuk Kim 2016-11-11 4300 if (err) {
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4301 out_err:
28cfafb73853f0 Chao Yu 2017-11-13 4302 clear_inode_flag(inode, FI_NO_PREALLOC);
a7de608691f766 Jaegeuk Kim 2016-11-11 4303 inode_unlock(inode);
126ce7214d2134 Chao Yu 2019-04-02 4304 ret = err;
126ce7214d2134 Chao Yu 2019-04-02 4305 goto out;
a7de608691f766 Jaegeuk Kim 2016-11-11 4306 }
47501f87c61ad2 Jaegeuk Kim 2019-11-26 4307 write:
b439b103a6c9eb Jaegeuk Kim 2016-02-03 4308 ret = __generic_file_write_iter(iocb, from);
dc91de78e5e1d4 Jaegeuk Kim 2017-01-13 4309 clear_inode_flag(inode, FI_NO_PREALLOC);
b0af6d491a6b5f Chao Yu 2017-08-02 4310
dc7a10ddee0c56 Jaegeuk Kim 2018-03-30 4311 /* if we couldn't write data, we should deallocate blocks. */
a303b0ac920d80 Chao Yu 2021-04-01 4312 if (preallocated && i_size_read(inode) < target_size) {
a303b0ac920d80 Chao Yu 2021-04-01 4313 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
a303b0ac920d80 Chao Yu 2021-04-01 @4314 down_write(&F2FS_I(inode)->i_mmap_sem);
dc7a10ddee0c56 Jaegeuk Kim 2018-03-30 4315 f2fs_truncate(inode);
a303b0ac920d80 Chao Yu 2021-04-01 4316 up_write(&F2FS_I(inode)->i_mmap_sem);
a303b0ac920d80 Chao Yu 2021-04-01 4317 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
a303b0ac920d80 Chao Yu 2021-04-01 4318 }
dc7a10ddee0c56 Jaegeuk Kim 2018-03-30 4319
b0af6d491a6b5f Chao Yu 2017-08-02 4320 if (ret > 0)
b0af6d491a6b5f Chao Yu 2017-08-02 4321 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
9dfa1baff76d08 Jaegeuk Kim 2016-07-13 4322 }
e0fcd01510ad02 Chao Yu 2020-12-26 4323 unlock:
b439b103a6c9eb Jaegeuk Kim 2016-02-03 4324 inode_unlock(inode);
126ce7214d2134 Chao Yu 2019-04-02 4325 out:
126ce7214d2134 Chao Yu 2019-04-02 4326 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
126ce7214d2134 Chao Yu 2019-04-02 4327 iov_iter_count(from), ret);
e259221763a404 Christoph Hellwig 2016-04-07 4328 if (ret > 0)
e259221763a404 Christoph Hellwig 2016-04-07 4329 ret = generic_write_sync(iocb, ret);
b439b103a6c9eb Jaegeuk Kim 2016-02-03 4330 return ret;
fcc85a4d86b501 Jaegeuk Kim 2015-04-21 4331 }
fcc85a4d86b501 Jaegeuk Kim 2015-04-21 4332
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 96f1a354f89f..f3177d03c28f 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3165,12 +3165,12 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to) /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */ if (to > i_size && !f2fs_verity_in_progress(inode)) { down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&mapping->invalidate_lock); truncate_pagecache(inode, i_size); f2fs_truncate_blocks(inode, i_size, true); - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&mapping->invalidate_lock); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); } } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 044878866ca3..1f887c906aaf 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -748,7 +748,6 @@ struct f2fs_inode_info { /* avoid racing between foreground op and gc */ struct rw_semaphore i_gc_rwsem[2]; - struct rw_semaphore i_mmap_sem; struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ int i_extra_isize; /* size of extra space located in i_addr */ diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 44a4650aea7b..6602f3c653c4 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -38,10 +38,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) struct inode *inode = file_inode(vmf->vma->vm_file); vm_fault_t ret; - down_read(&F2FS_I(inode)->i_mmap_sem); ret = filemap_fault(vmf); - up_read(&F2FS_I(inode)->i_mmap_sem); - if (!ret) f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO, F2FS_BLKSIZE); @@ -102,7 +99,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); file_update_time(vmf->vma->vm_file); - down_read(&F2FS_I(inode)->i_mmap_sem); + down_read(&inode->i_mapping->invalidate_lock); lock_page(page); if (unlikely(page->mapping != inode->i_mapping || page_offset(page) > i_size_read(inode) || @@ -161,7 +158,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) trace_f2fs_vm_page_mkwrite(page, DATA); out_sem: - up_read(&F2FS_I(inode)->i_mmap_sem); + up_read(&inode->i_mapping->invalidate_lock); sb_end_pagefault(inode->i_sb); err: @@ -942,7 +939,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, } down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&inode->i_mapping->invalidate_lock); truncate_setsize(inode, attr->ia_size); @@ -952,7 +949,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, * do not trim all blocks after i_size if target size is * larger than i_size. */ - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&inode->i_mapping->invalidate_lock); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); if (err) return err; @@ -1097,7 +1094,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) blk_end = (loff_t)pg_end << PAGE_SHIFT; down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&mapping->invalidate_lock); truncate_inode_pages_range(mapping, blk_start, blk_end - 1); @@ -1106,7 +1103,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) ret = f2fs_truncate_hole(inode, pg_start, pg_end); f2fs_unlock_op(sbi); - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&mapping->invalidate_lock); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); } } @@ -1341,7 +1338,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) /* avoid gc operation during block exchange */ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&inode->i_mapping->invalidate_lock); f2fs_lock_op(sbi); f2fs_drop_extent_tree(inode); @@ -1349,7 +1346,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); f2fs_unlock_op(sbi); - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&inode->i_mapping->invalidate_lock); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); return ret; } @@ -1380,13 +1377,13 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) return ret; /* write out all moved pages, if possible */ - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&inode->i_mapping->invalidate_lock); filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); truncate_pagecache(inode, offset); new_size = i_size_read(inode) - len; ret = f2fs_truncate_blocks(inode, new_size, true); - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&inode->i_mapping->invalidate_lock); if (!ret) f2fs_i_size_write(inode, new_size); return ret; @@ -1486,7 +1483,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, pgoff_t end; down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&mapping->invalidate_lock); truncate_pagecache_range(inode, (loff_t)index << PAGE_SHIFT, @@ -1498,7 +1495,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); if (ret) { f2fs_unlock_op(sbi); - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&mapping->invalidate_lock); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); goto out; } @@ -1510,7 +1507,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&mapping->invalidate_lock); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_balance_fs(sbi, dn.node_changed); @@ -1545,6 +1542,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct address_space *mapping = inode->i_mapping; pgoff_t nr, pg_start, pg_end, delta, idx; loff_t new_size; int ret = 0; @@ -1567,14 +1565,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) f2fs_balance_fs(sbi, true); - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&mapping->invalidate_lock); ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&mapping->invalidate_lock); if (ret) return ret; /* write out all dirty pages from offset */ - ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); + ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX); if (ret) return ret; @@ -1585,7 +1583,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) /* avoid gc operation during block exchange */ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&mapping->invalidate_lock); truncate_pagecache(inode, offset); while (!ret && idx > pg_start) { @@ -1601,14 +1599,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) idx + delta, nr, false); f2fs_unlock_op(sbi); } - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&mapping->invalidate_lock); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); /* write out all moved pages, if possible */ - down_write(&F2FS_I(inode)->i_mmap_sem); - filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); + down_write(&mapping->invalidate_lock); + filemap_write_and_wait_range(mapping, offset, LLONG_MAX); truncate_pagecache(inode, offset); - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&mapping->invalidate_lock); if (!ret) f2fs_i_size_write(inode, new_size); @@ -3442,7 +3440,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) goto out; down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&inode->i_mapping->invalidate_lock); last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); @@ -3478,7 +3476,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) } up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&inode->i_mapping->invalidate_lock); out: inode_unlock(inode); @@ -3595,7 +3593,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) } down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&inode->i_mapping->invalidate_lock); last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); @@ -3631,7 +3629,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) } up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&inode->i_mapping->invalidate_lock); if (ret >= 0) { F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL; @@ -3751,7 +3749,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) goto err; down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + down_write(&mapping->invalidate_lock); ret = filemap_write_and_wait_range(mapping, range.start, to_end ? LLONG_MAX : end_addr - 1); @@ -3838,7 +3836,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) ret = f2fs_secure_erase(prev_bdev, inode, prev_index, prev_block, len, range.flags); out: - up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&mapping->invalidate_lock); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); err: inode_unlock(inode); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 7d325bfaf65a..22e942aac7ad 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1187,7 +1187,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) mutex_init(&fi->inmem_lock); init_rwsem(&fi->i_gc_rwsem[READ]); init_rwsem(&fi->i_gc_rwsem[WRITE]); - init_rwsem(&fi->i_mmap_sem); init_rwsem(&fi->i_xattr_sem); /* Will be used by directory only */
Use invalidate_lock instead of f2fs' private i_mmap_sem. The intended purpose is exactly the same. By this conversion we fix a long standing race between hole punching and read(2) / readahead(2) paths that can lead to stale page cache contents. CC: Jaegeuk Kim <jaegeuk@kernel.org> CC: Chao Yu <yuchao0@huawei.com> CC: linux-f2fs-devel@lists.sourceforge.net Signed-off-by: Jan Kara <jack@suse.cz> --- fs/f2fs/data.c | 4 ++-- fs/f2fs/f2fs.h | 1 - fs/f2fs/file.c | 58 ++++++++++++++++++++++++------------------------- fs/f2fs/super.c | 1 - 4 files changed, 30 insertions(+), 34 deletions(-)