===================================================================
@@ -126,39 +126,32 @@ void __sched mutex_unlock(struct mutex *
EXPORT_SYMBOL(mutex_unlock);
-/*
- * Lock a mutex (possibly interruptible), slowpath:
+/**
+ * mutex_spin - optimistic spinning on mutex
+ * @lock: mutex to spin on
+ *
+ * This function implements optimistic spin for acquisition of @lock when
+ * the lock owner is currently running on a (different) CPU.
+ *
+ * The rationale is that if the lock owner is running, it is likely to
+ * release the lock soon.
+ *
+ * Since this needs the lock owner, and this mutex implementation doesn't
+ * track the owner atomically in the lock field, we need to track it
+ * non-atomically.
+ *
+ * We can't do this for DEBUG_MUTEXES because that relies on wait_lock to
+ * serialize everything.
+ *
+ * CONTEXT:
+ * Preemption disabled.
+ *
+ * RETURNS:
+ * %true if @lock is acquired, %false otherwise.
*/
-static inline int __sched
-__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
- unsigned long ip)
+static inline bool mutex_spin(struct mutex *lock)
{
- struct task_struct *task = current;
- struct mutex_waiter waiter;
- unsigned long flags;
-
- preempt_disable();
- mutex_acquire(&lock->dep_map, subclass, 0, ip);
-
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- /*
- * Optimistic spinning.
- *
- * We try to spin for acquisition when we find that there are no
- * pending waiters and the lock owner is currently running on a
- * (different) CPU.
- *
- * The rationale is that if the lock owner is running, it is likely to
- * release the lock soon.
- *
- * Since this needs the lock owner, and this mutex implementation
- * doesn't track the owner atomically in the lock field, we need to
- * track it non-atomically.
- *
- * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
- * to serialize everything.
- */
-
for (;;) {
struct thread_info *owner;
@@ -177,12 +170,8 @@ __mutex_lock_common(struct mutex *lock,
if (owner && !mutex_spin_on_owner(lock, owner))
break;
- if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
- lock_acquired(&lock->dep_map, ip);
- mutex_set_owner(lock);
- preempt_enable();
- return 0;
- }
+ if (atomic_cmpxchg(&lock->count, 1, 0) == 1)
+ return true;
/*
* When there's no owner, we might have preempted between the
@@ -190,7 +179,7 @@ __mutex_lock_common(struct mutex *lock,
* we're an RT task that will live-lock because we won't let
* the owner complete.
*/
- if (!owner && (need_resched() || rt_task(task)))
+ if (!owner && (need_resched() || rt_task(current)))
break;
/*
@@ -202,6 +191,30 @@ __mutex_lock_common(struct mutex *lock,
arch_mutex_cpu_relax();
}
#endif
+ return false;
+}
+
+/*
+ * Lock a mutex (possibly interruptible), slowpath:
+ */
+static inline int __sched
+__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ unsigned long ip)
+{
+ struct task_struct *task = current;
+ struct mutex_waiter waiter;
+ unsigned long flags;
+
+ preempt_disable();
+ mutex_acquire(&lock->dep_map, subclass, 0, ip);
+
+ if (mutex_spin(lock)) {
+ lock_acquired(&lock->dep_map, ip);
+ mutex_set_owner(lock);
+ preempt_enable();
+ return 0;
+ }
+
spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter);