@@ -400,7 +400,7 @@ static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
/*
* As ctx->flc_lock is held, new requests cannot be added to
- * ->fl_blocked_requests, so we don't need a lock to check if it
+ * ->flc_blocked_requests, so we don't need a lock to check if it
* is empty.
*/
if (list_empty(&fl->fl_core.flc_blocked_requests))
@@ -410,7 +410,7 @@ static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
&new->fl_core.flc_blocked_requests);
list_for_each_entry(f, &new->fl_core.flc_blocked_requests,
fl_core.flc_blocked_member)
- f->fl_core.flc_blocker = new;
+ f->fl_core.flc_blocker = &new->fl_core;
spin_unlock(&blocked_lock_lock);
}
@@ -773,7 +773,7 @@ static void __locks_insert_block(struct file_lock *blocker_fl,
blocker = flc;
goto new_blocker;
}
- waiter->flc_blocker = file_lock(blocker);
+ waiter->flc_blocker = blocker;
list_add_tail(&waiter->flc_blocked_member,
&blocker->flc_blocked_requests);
@@ -996,7 +996,7 @@ static struct file_lock_core *what_owner_is_waiting_for(struct file_lock_core *b
hash_for_each_possible(blocked_hash, flc, flc_link, posix_owner_key(blocker)) {
if (posix_same_owner(flc, blocker)) {
while (flc->flc_blocker)
- flc = &flc->flc_blocker->fl_core;
+ flc = flc->flc_blocker;
return flc;
}
}
@@ -2798,9 +2798,9 @@ static struct file_lock *get_next_blocked_member(struct file_lock *node)
/* Next member in the linked list could be itself */
tmp = list_next_entry(node, fl_core.flc_blocked_member);
- if (list_entry_is_head(tmp, &node->fl_core.flc_blocker->fl_core.flc_blocked_requests,
- fl_core.flc_blocked_member)
- || tmp == node) {
+ if (list_entry_is_head(tmp, &node->fl_core.flc_blocker->flc_blocked_requests,
+ fl_core.flc_blocked_member)
+ || tmp == node) {
return NULL;
}
@@ -2841,7 +2841,7 @@ static int locks_show(struct seq_file *f, void *v)
tmp = get_next_blocked_member(cur);
/* Fall back to parent node */
while (tmp == NULL && cur->fl_core.flc_blocker != NULL) {
- cur = cur->fl_core.flc_blocker;
+ cur = file_lock(cur->fl_core.flc_blocker);
level--;
tmp = get_next_blocked_member(cur);
}
@@ -87,7 +87,7 @@ bool opens_in_grace(struct net *);
*/
struct file_lock_core {
- struct file_lock *flc_blocker; /* The lock that is blocking us */
+ struct file_lock_core *flc_blocker; /* The lock that is blocking us */
struct list_head flc_list; /* link into file_lock_context */
struct hlist_node flc_link; /* node in global lists */
struct list_head flc_blocked_requests; /* list of requests with
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, blocker)
+ __field(struct file_lock_core *, blocker)
__field(fl_owner_t, owner)
__field(unsigned int, pid)
__field(unsigned int, flags)
@@ -125,7 +125,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, blocker)
+ __field(struct file_lock_core *, blocker)
__field(fl_owner_t, owner)
__field(unsigned int, flags)
__field(unsigned char, type)
Both locks and leases deal with fl_blocker. Switch the fl_blocker pointer in struct file_lock_core to point to the file_lock_core of the blocker instead of a file_lock structure. Signed-off-by: Jeff Layton <jlayton@kernel.org> --- fs/locks.c | 16 ++++++++-------- include/linux/filelock.h | 2 +- include/trace/events/filelock.h | 4 ++-- 3 files changed, 11 insertions(+), 11 deletions(-)