@@ -603,20 +603,20 @@ static int posix_same_owner(struct file_lock_core *fl1, struct file_lock_core *f
}
/* Must be called with the flc_lock held! */
-static void locks_insert_global_locks(struct file_lock *fl)
+static void locks_insert_global_locks(struct file_lock_core *flc)
{
struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
percpu_rwsem_assert_held(&file_rwsem);
spin_lock(&fll->lock);
- fl->fl_core.fl_link_cpu = smp_processor_id();
- hlist_add_head(&fl->fl_core.fl_link, &fll->hlist);
+ flc->fl_link_cpu = smp_processor_id();
+ hlist_add_head(&flc->fl_link, &fll->hlist);
spin_unlock(&fll->lock);
}
/* Must be called with the flc_lock held! */
-static void locks_delete_global_locks(struct file_lock *fl)
+static void locks_delete_global_locks(struct file_lock_core *flc)
{
struct file_lock_list_struct *fll;
@@ -627,12 +627,12 @@ static void locks_delete_global_locks(struct file_lock *fl)
* is done while holding the flc_lock, and new insertions into the list
* also require that it be held.
*/
- if (hlist_unhashed(&fl->fl_core.fl_link))
+ if (hlist_unhashed(&flc->fl_link))
return;
- fll = per_cpu_ptr(&file_lock_list, fl->fl_core.fl_link_cpu);
+ fll = per_cpu_ptr(&file_lock_list, flc->fl_link_cpu);
spin_lock(&fll->lock);
- hlist_del_init(&fl->fl_core.fl_link);
+ hlist_del_init(&flc->fl_link);
spin_unlock(&fll->lock);
}
@@ -821,13 +821,13 @@ static void
locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
{
list_add_tail(&fl->fl_core.fl_list, before);
- locks_insert_global_locks(fl);
+ locks_insert_global_locks(&fl->fl_core);
}
static void
locks_unlink_lock_ctx(struct file_lock *fl)
{
- locks_delete_global_locks(fl);
+ locks_delete_global_locks(&fl->fl_core);
list_del_init(&fl->fl_core.fl_list);
locks_wake_up_blocks(fl);
}
Convert these functions to take a file_lock_core instead of a file_lock. Signed-off-by: Jeff Layton <jlayton@kernel.org> --- fs/locks.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-)