@@ -95,7 +95,7 @@
#define ldlm_set_flock_deadlock(_l) LDLM_SET_FLAG((_l), 1ULL << 15)
#define ldlm_clear_flock_deadlock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 15)
-/** discard (no writeback) on cancel */
+/** discard (no writeback) (PW locks) or page retention (PR locks)) on cancel */
#define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL /* bit 16 */
#define ldlm_is_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 16)
#define ldlm_set_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 16)
@@ -96,6 +96,12 @@ enum {
LDLM_LRU_FLAG_NO_WAIT = BIT(4), /* Cancel locks w/o blocking (neither
* sending nor waiting for any rpcs)
*/
+ LDLM_LRU_FLAG_CLEANUP = BIT(5), /* Used when clearing lru, tells
+ * prepare_lru_list to set discard
+ * flag on PR extent locks so we
+ * don't waste time saving pages
+ * that will be discarded momentarily
+ */
};
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
@@ -1360,6 +1360,10 @@ typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
* (typically before replaying locks) w/o
* sending any RPCs or waiting for any
* outstanding RPC to complete.
+ *
+ * flags & LDLM_CANCEL_CLEANUP - when cancelling read locks, do not check for
+ * other read locks covering the same pages, just
+ * discard those pages.
*/
static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
struct list_head *cancels, int count, int max,
@@ -1487,6 +1491,11 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
*/
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
+ if ((flags & LDLM_LRU_FLAG_CLEANUP) &&
+ lock->l_resource->lr_type == LDLM_EXTENT &&
+ lock->l_granted_mode == LCK_PR)
+ ldlm_set_discard_data(lock);
+
/* We can't re-add to l_lru as it confuses the
* refcounting in ldlm_lock_remove_from_lru() if an AST
* arrives after we drop lr_lock below. We use l_bl_ast
@@ -197,7 +197,8 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
/* Try to cancel all @ns_nr_unused locks. */
canceled = ldlm_cancel_lru(ns, unused, 0,
- LDLM_LRU_FLAG_PASSED);
+ LDLM_LRU_FLAG_PASSED |
+ LDLM_LRU_FLAG_CLEANUP);
if (canceled < unused) {
CDEBUG(D_DLMTRACE,
"not all requested locks are canceled, requested: %d, canceled: %d\n",
@@ -208,7 +209,8 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
} else {
tmp = ns->ns_max_unused;
ns->ns_max_unused = 0;
- ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
+ ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED |
+ LDLM_LRU_FLAG_CLEANUP);
ns->ns_max_unused = tmp;
}
return count;
@@ -3339,7 +3339,7 @@ static int discard_cb(const struct lu_env *env, struct cl_io *io,
* behind this being that lock cancellation cannot be delayed indefinitely).
*/
int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
- pgoff_t start, pgoff_t end, enum cl_lock_mode mode)
+ pgoff_t start, pgoff_t end, bool discard)
{
struct osc_thread_info *info = osc_env_info(env);
struct cl_io *io = &info->oti_io;
@@ -3353,7 +3353,7 @@ int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
if (result != 0)
goto out;
- cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
+ cb = discard ? discard_cb : check_and_discard_cb;
info->oti_fn_index = start;
info->oti_next_index = start;
do {
@@ -670,7 +670,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
void osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
- pgoff_t start, pgoff_t end, enum cl_lock_mode mode);
+ pgoff_t start, pgoff_t end, bool discard_pages);
typedef int (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
struct osc_page *, void *);
@@ -380,7 +380,7 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
}
static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
- enum cl_lock_mode mode, int discard)
+ enum cl_lock_mode mode, bool discard)
{
struct lu_env *env;
u16 refcheck;
@@ -401,7 +401,7 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
rc = 0;
}
- rc2 = osc_lock_discard_pages(env, obj, start, end, mode);
+ rc2 = osc_lock_discard_pages(env, obj, start, end, discard);
if (rc == 0 && rc2 < 0)
rc = rc2;
@@ -417,10 +417,10 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
struct ldlm_lock *dlmlock,
void *data, int flag)
{
+ enum cl_lock_mode mode = CLM_READ;
struct cl_object *obj = NULL;
int result = 0;
- int discard;
- enum cl_lock_mode mode = CLM_READ;
+ bool discard;
LASSERT(flag == LDLM_CB_CANCELING);
@@ -1098,7 +1098,7 @@ static void osc_lock_lockless_cancel(const struct lu_env *env,
LASSERT(!ols->ols_dlmlock);
result = osc_lock_flush(osc, descr->cld_start, descr->cld_end,
- descr->cld_mode, 0);
+ descr->cld_mode, false);
if (result)
CERROR("Pages for lockless lock %p were not purged(%d)\n",
ols, result);
@@ -462,7 +462,7 @@ int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc)
osc_cache_truncate_start(env, osc, 0, NULL);
/* Discard all caching pages */
- osc_lock_discard_pages(env, osc, 0, CL_PAGE_EOF, CLM_WRITE);
+ osc_lock_discard_pages(env, osc, 0, CL_PAGE_EOF, true);
/* Clear ast data of dlm lock. Do this after discarding all pages */
osc_object_prune(env, osc2cl(osc));