@@ -350,7 +350,8 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock,
* access and not reliable.
*/
static noinline
-bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
+bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
+ bool use_ww_ctx, struct ww_acquire_ctx *ww_ctx)
{
bool ret = true;
@@ -373,6 +374,28 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
break;
}
+ if (use_ww_ctx && ww_ctx->acquired > 0) {
+ struct ww_mutex *ww;
+
+ ww = container_of(lock, struct ww_mutex, base);
+
+ /*
+ * If ww->ctx is set the contents are undefined, only
+ * by acquiring wait_lock there is a guarantee that
+ * they are not invalid when reading.
+ *
+ * As such, when deadlock detection needs to be
+ * performed the optimistic spinning cannot be done.
+ *
+ * Check this in every inner iteration because we may
+ * be racing against another thread's ww_mutex_lock.
+ */
+ if (READ_ONCE(ww->ctx)) {
+ ret = false;
+ break;
+ }
+ }
+
cpu_relax();
}
rcu_read_unlock();
@@ -460,22 +483,6 @@ static bool mutex_optimistic_spin(struct mutex *lock,
for (;;) {
struct task_struct *owner;
- if (use_ww_ctx && ww_ctx->acquired > 0) {
- struct ww_mutex *ww;
-
- ww = container_of(lock, struct ww_mutex, base);
- /*
- * If ww->ctx is set the contents are undefined, only
- * by acquiring wait_lock there is a guarantee that
- * they are not invalid when reading.
- *
- * As such, when deadlock detection needs to be
- * performed the optimistic spinning cannot be done.
- */
- if (READ_ONCE(ww->ctx))
- goto fail_unlock;
- }
-
/*
* If there's an owner, wait for it to either
* release the lock or go to sleep.
@@ -487,7 +494,8 @@ static bool mutex_optimistic_spin(struct mutex *lock,
break;
}
- if (!mutex_spin_on_owner(lock, owner))
+ if (!mutex_spin_on_owner(lock, owner, use_ww_ctx,
+ ww_ctx))
goto fail_unlock;
}