@@ -593,6 +593,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
if (PagePrivate(page))
afs_invalidate_dirty(page, offset, length);
+ wait_on_page_fscache(page);
_leave("");
}
@@ -610,6 +611,14 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
/* deny if page is being written to the cache and the caller hasn't
* elected to wait */
+#ifdef CONFIG_AFS_FSCACHE
+ if (PageFsCache(page)) {
+ if (!(gfp_flags & __GFP_DIRECT_RECLAIM) || !(gfp_flags & __GFP_FS))
+ return false;
+ wait_on_page_fscache(page);
+ }
+#endif
+
if (PagePrivate(page)) {
detach_page_private(page);
trace_afs_page_dirty(vnode, tracepoint_string("rel"), page);
@@ -117,6 +117,10 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
SetPageUptodate(page);
}
+#ifdef CONFIG_AFS_FSCACHE
+ wait_on_page_fscache(page);
+#endif
+
try_again:
/* See if this page is already partially written in a way that we can
* merge the new write with.
@@ -857,6 +861,11 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
/* Wait for the page to be written to the cache before we allow it to
* be modified. We then assume the entire page will need writing back.
*/
+#ifdef CONFIG_AFS_FSCACHE
+ if (PageFsCache(vmf->page) &&
+ wait_on_page_bit_killable(vmf->page, PG_fscache) < 0)
+ return VM_FAULT_RETRY;
+#endif
if (wait_on_page_writeback_killable(vmf->page))
return VM_FAULT_RETRY;
@@ -947,5 +956,6 @@ int afs_launder_page(struct page *page)
detach_page_private(page);
trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
+ wait_on_page_fscache(page);
return ret;
}