@@ -91,6 +91,7 @@
#include <linux/uio.h>
#include <lu_object.h>
#include <linux/atomic.h>
+#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/spinlock.h>
@@ -1071,6 +1072,14 @@ static inline bool __page_in_use(const struct cl_page *page, int refc)
*/
#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+/* references: cl_page, page cache, optional + refcount for caller reference
+ * (always 0 or 1 currently)
+ */
+static inline int vmpage_in_use(struct page *vmpage, int refcount)
+{
+ return (page_count(vmpage) - page_mapcount(vmpage) > 2 + refcount);
+}
+
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
@@ -102,7 +102,7 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
{
struct lu_env *env;
struct cl_object *obj;
- struct cl_page *page;
+ struct cl_page *clpage;
struct address_space *mapping;
int result = 0;
@@ -118,16 +118,23 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
if (!obj)
return 1;
- page = cl_vmpage_page(vmpage, obj);
- if (!page)
+ clpage = cl_vmpage_page(vmpage, obj);
+ if (!clpage)
return 1;
env = cl_env_percpu_get();
LASSERT(!IS_ERR(env));
- if (!cl_page_in_use(page)) {
+ /* we must not delete the cl_page if the vmpage is in use, otherwise we
+ * disconnect the vmpage from Lustre while it's still alive(!), which
+ * means we won't find it to discard on lock cancellation.
+ *
+ * References here are: caller + cl_page + page cache.
+ * Any other references are potentially transient and must be ignored.
+ */
+ if (!cl_page_in_use(clpage) && !vmpage_in_use(vmpage, 1)) {
result = 1;
- cl_page_delete(env, page);
+ cl_page_delete(env, clpage);
}
/* To use percpu env array, the call path can not be rescheduled;
@@ -144,7 +151,7 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
* that we won't get into object delete path.
*/
LASSERT(cl_object_refc(obj) > 1);
- cl_page_put(env, page);
+ cl_page_put(env, clpage);
cl_env_percpu_put(env);
return result;
@@ -539,8 +539,13 @@ static inline bool lru_page_busy(struct client_obd *cli, struct cl_page *page)
if (cli->cl_cache->ccc_unstable_check) {
struct page *vmpage = cl_page_vmpage(page);
- /* vmpage have two known users: cl_page and VM page cache */
- if (page_count(vmpage) - page_mapcount(vmpage) > 2)
+ /* this check is racy because the vmpage is not locked, but
+ * that's OK - the code which does the actual page release
+ * checks this again before releasing
+ *
+ * vmpage have two known users: cl_page and VM page cache
+ */
+ if (vmpage_in_use(vmpage, 0))
return true;
}
return false;