@@ -108,4 +108,6 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
return prev;
}
+#define __MUTEX_XCHG_FAST_PATH
+
#endif
@@ -161,6 +161,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
for (;;) {
struct task_struct *owner;
+ int locked_val;
/*
* If there's an owner, wait for it to either
@@ -170,7 +171,19 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
if (owner && !mutex_spin_on_owner(lock, owner))
break;
- if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
+#ifdef __MUTEX_XCHG_FAST_PATH
+ /*
+ * The fast path based on xchg sets a transient 0 count,
+ * relying on the slow path to restore a possible
+ * pre-existing contended count. Without checking the
+ * waiters' list we must presume possible contension here.
+ */
+ locked_val = -1;
+#else
+ locked_val = 0;
+#endif
+
+ if (atomic_cmpxchg(&lock->count, 1, locked_val) == 1) {
lock_acquired(&lock->dep_map, ip);
mutex_set_owner(lock);
preempt_enable();