@@ -385,12 +385,11 @@ static void nfs_readpage_from_fscache_complete(struct page *page,
"NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
page, context, error);
- /* if the read completes with an error, we just unlock the page and let
+ /* if the read completes with an error, unlock the page and let
* the VM reissue the readpage */
- if (!error) {
+ if (!error)
SetPageUptodate(page);
- unlock_page(page);
- }
+ unlock_page(page);
}
/*
@@ -362,13 +362,13 @@ int nfs_readpage(struct file *file, struct page *page)
} else
desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
+ xchg(&desc.ctx->error, 0);
if (!IS_SYNC(inode)) {
ret = nfs_readpage_from_fscache(desc.ctx, inode, page);
if (ret == 0)
- goto out;
+ goto out_wait;
}
- xchg(&desc.ctx->error, 0);
nfs_pageio_init_read(&desc.pgio, inode, false,
&nfs_async_read_completion_ops);
@@ -378,6 +378,7 @@ int nfs_readpage(struct file *file, struct page *page)
nfs_pageio_complete_read(&desc.pgio);
ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
+out_wait:
if (!ret) {
ret = wait_on_page_locked_killable(page);
if (!PageUptodate(page) && !ret)
Earlier commits refactored some NFS read code and removed nfs_readpage_async(), but neglected to properly fixup nfs_readpage_from_fscache_complete(). The code path is only hit when something unusual occurs with the cachefiles backing filesystem, such as an IO error or while a cookie is being invalidated. Fixup this path by unconditionally unlocking the page and letting the VM decide what to do based on PG_uptodate. Note that the VM re-issue of the IO will likely go back to fscache, so we may end up failing the read if fscache has a permanent error (such as EIO). Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> --- fs/nfs/fscache.c | 7 +++---- fs/nfs/read.c | 5 +++-- 2 files changed, 6 insertions(+), 6 deletions(-)