@@ -858,6 +858,7 @@ enum ldlm_match_flags {
LDLM_MATCH_UNREF = BIT(0),
LDLM_MATCH_AST = BIT(1),
LDLM_MATCH_AST_ANY = BIT(2),
+ LDLM_MATCH_RIGHT = BIT(3),
};
/**
@@ -186,6 +186,7 @@ struct osc_thread_info {
*/
pgoff_t oti_next_index;
pgoff_t oti_fn_index; /* first non-overlapped index */
+ pgoff_t oti_ng_index; /* negative lock caching */
struct cl_sync_io oti_anchor;
struct cl_req_attr oti_req_attr;
struct lu_buf oti_ladvise_buf;
@@ -248,6 +249,10 @@ enum osc_dap_flags {
* check ast data is present, requested to cancel cb
*/
OSC_DAP_FL_AST = BIT(2),
+ /**
+ * look at right region for the desired lock
+ */
+ OSC_DAP_FL_RIGHT = BIT(3),
};
/*
@@ -1093,8 +1093,9 @@ static bool lock_matches(struct ldlm_lock *lock, void *vdata)
switch (lock->l_resource->lr_type) {
case LDLM_EXTENT:
- if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
- lpol->l_extent.end < data->lmd_policy->l_extent.end)
+ if (!(data->lmd_match & LDLM_MATCH_RIGHT) &&
+ (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
+ lpol->l_extent.end < data->lmd_policy->l_extent.end))
return false;
if (unlikely(match == LCK_GROUP) &&
@@ -1160,10 +1161,17 @@ static bool lock_matches(struct ldlm_lock *lock, void *vdata)
struct ldlm_lock *search_itree(struct ldlm_resource *res,
struct ldlm_match_data *data)
{
+ struct ldlm_extent ext = {
+ .start = data->lmd_policy->l_extent.start,
+ .end = data->lmd_policy->l_extent.end
+ };
int idx;
data->lmd_lock = NULL;
+ if (data->lmd_match & LDLM_MATCH_RIGHT)
+ ext.end = OBD_OBJECT_EOF;
+
for (idx = 0; idx < LCK_MODE_NUM; idx++) {
struct ldlm_interval_tree *tree = &res->lr_itree[idx];
@@ -1173,9 +1181,7 @@ struct ldlm_lock *search_itree(struct ldlm_resource *res,
if (!(tree->lit_mode & *data->lmd_mode))
continue;
- ldlm_extent_search(&tree->lit_root,
- data->lmd_policy->l_extent.start,
- data->lmd_policy->l_extent.end,
+ ldlm_extent_search(&tree->lit_root, ext.start, ext.end,
lock_matches, data);
if (data->lmd_lock)
return data->lmd_lock;
@@ -3207,28 +3207,51 @@ static bool check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
{
struct osc_thread_info *info = osc_env_info(env);
struct osc_object *osc = cbdata;
+ struct cl_page *page = ops->ops_cl.cpl_page;
pgoff_t index;
+ bool discard = false;
index = osc_index(ops);
- if (index >= info->oti_fn_index) {
+ /* negative lock caching */
+ if (index < info->oti_ng_index) {
+ discard = true;
+ } else if (index >= info->oti_fn_index) {
struct ldlm_lock *tmp;
- struct cl_page *page = ops->ops_cl.cpl_page;
/* refresh non-overlapped index */
tmp = osc_dlmlock_at_pgoff(env, osc, index,
- OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_AST);
+ OSC_DAP_FL_TEST_LOCK |
+ OSC_DAP_FL_AST | OSC_DAP_FL_RIGHT);
if (tmp) {
u64 end = tmp->l_policy_data.l_extent.end;
- /* Cache the first-non-overlapped index so as to skip
- * all pages within [index, oti_fn_index). This is safe
- * because if tmp lock is canceled, it will discard
- * these pages.
- */
- info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
- if (end == OBD_OBJECT_EOF)
- info->oti_fn_index = CL_PAGE_EOF;
+ u64 start = tmp->l_policy_data.l_extent.start;
+
+ /* no lock covering this page */
+ if (index < cl_index(osc2cl(osc), start)) {
+ /* no lock at @index, first lock at @start */
+ info->oti_ng_index = cl_index(osc2cl(osc),
+ start);
+ discard = true;
+ } else {
+ /* Cache the first-non-overlapped index so as to
+ * skip all pages within [index, oti_fn_index).
+ * This is safe because if tmp lock is canceled,
+ * it will discard these pages.
+ */
+ info->oti_fn_index = cl_index(osc2cl(osc),
+ end + 1);
+ if (end == OBD_OBJECT_EOF)
+ info->oti_fn_index = CL_PAGE_EOF;
+ }
LDLM_LOCK_PUT(tmp);
- } else if (cl_page_own(env, io, page) == 0) {
+ } else {
+ info->oti_ng_index = CL_PAGE_EOF;
+ discard = true;
+ }
+ }
+
+ if (discard) {
+ if (cl_page_own(env, io, page) == 0) {
/* discard the page */
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
@@ -3292,6 +3315,7 @@ int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
cb = discard ? osc_discard_cb : check_and_discard_cb;
info->oti_fn_index = start;
info->oti_next_index = start;
+ info->oti_ng_index = 0;
osc_page_gang_lookup(env, io, osc,
info->oti_next_index, end, cb, osc);
@@ -1282,6 +1282,9 @@ struct ldlm_lock *osc_obj_dlmlock_at_pgoff(const struct lu_env *env,
if (dap_flags & OSC_DAP_FL_CANCELING)
match_flags |= LDLM_MATCH_UNREF;
+ if (dap_flags & OSC_DAP_FL_RIGHT)
+ match_flags |= LDLM_MATCH_RIGHT;
+
/*
* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too