@@ -378,6 +378,28 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
struct mutex_waiter *waiter)
{
bool ret = true;
+ struct ww_acquire_ctx *owner_ww_ctx = NULL;
+
+ if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
+ struct ww_mutex *ww;
+ unsigned long flags;
+
+ ww = container_of(lock, struct ww_mutex, base);
+
+ /*
+ * Check the stamp of the current owner once. This allows us
+ * to spin optimistically in the case where the current owner
+ * has a higher stamp than us.
+ */
+ spin_lock_mutex(&lock->wait_lock, flags);
+ owner_ww_ctx = ww->ctx;
+ if (owner_ww_ctx &&
+ __ww_mutex_stamp_after(ww_ctx, owner_ww_ctx)) {
+ spin_unlock_mutex(&lock->wait_lock, flags);
+ return false;
+ }
+ spin_unlock_mutex(&lock->wait_lock, flags);
+ }
rcu_read_lock();
while (__mutex_owner(lock) == owner) {
@@ -414,9 +436,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
* Check this in every inner iteration because we may
* be racing against another thread's ww_mutex_lock.
*/
- if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) {
- ret = false;
- break;
+ if (ww_ctx->acquired > 0) {
+ struct ww_acquire_ctx *current_ctx;
+
+ current_ctx = READ_ONCE(ww->ctx);
+
+ if (current_ctx &&
+ current_ctx != owner_ww_ctx) {
+ ret = false;
+ break;
+ }
}
/*