@@ -164,6 +164,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
struct inode *ino;
struct pnfs_layout_hdr *lo;
u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+ bool need_commit = false;
LIST_HEAD(free_me_list);
lo = get_layout_by_fh(clp, &args->cbl_fh, &args->cbl_stateid);
@@ -172,16 +173,21 @@ static u32 initiate_file_draining(struct nfs_client *clp,
ino = lo->plh_inode;
spin_lock(&ino->i_lock);
- if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
- pnfs_mark_matching_lsegs_invalid(lo, &free_me_list,
- &args->cbl_range))
+ if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
rv = NFS4ERR_DELAY;
- else
- rv = NFS4ERR_NOMATCHING_LAYOUT;
+ } else if (pnfs_mark_matching_lsegs_invalid(lo, &free_me_list,
+ &args->cbl_range)) {
+ need_commit = true;
+ rv = NFS4ERR_DELAY;
+ }
+
pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&free_me_list);
pnfs_put_layout_hdr(lo);
+
+ if (need_commit)
+ pnfs_layoutcommit_inode(ino, false);
iput(ino);
out:
return rv;
@@ -604,6 +604,9 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&lseg_list);
pnfs_put_layout_hdr(lo);
+
+ if (ret)
+ pnfs_layoutcommit_inode(inode, false);
iput(inode);
}
return ret;
Expedite layout recall processing by forcing a layout commit when we see busy segments. Without it the layout recall might have to wait until the VM decided to start writeback for the file, which can introduce long delays. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/nfs/callback_proc.c | 16 +++++++++++----- fs/nfs/pnfs.c | 3 +++ 2 files changed, 14 insertions(+), 5 deletions(-)