@@ -261,29 +261,31 @@ void spin_debug_disable(void)
#ifdef CONFIG_DEBUG_LOCK_PROFILE
+#define LOCK_PROFILE_PAR lock->profile
#define LOCK_PROFILE_REL \
- if ( lock->profile ) \
+ if ( profile ) \
{ \
- lock->profile->time_hold += NOW() - lock->profile->time_locked; \
- lock->profile->lock_cnt++; \
+ profile->time_hold += NOW() - profile->time_locked; \
+ profile->lock_cnt++; \
}
#define LOCK_PROFILE_VAR(var, val) s_time_t var = (val)
#define LOCK_PROFILE_BLOCK(var ) var = var ? : NOW()
#define LOCK_PROFILE_BLKACC(tst, val) \
if ( tst ) \
{ \
- lock->profile->time_block += lock->profile->time_locked - (val); \
- lock->profile->block_cnt++; \
+ profile->time_block += profile->time_locked - (val); \
+ profile->block_cnt++; \
}
#define LOCK_PROFILE_GOT(val) \
- if ( lock->profile ) \
+ if ( profile ) \
{ \
- lock->profile->time_locked = NOW(); \
+ profile->time_locked = NOW(); \
LOCK_PROFILE_BLKACC(val, val); \
}
#else
+#define LOCK_PROFILE_PAR NULL
#define LOCK_PROFILE_REL
#define LOCK_PROFILE_VAR(var, val)
#define LOCK_PROFILE_BLOCK(var)
@@ -307,17 +309,18 @@ static always_inline uint16_t observe_head(const spinlock_tickets_t *t)
return read_atomic(&t->head);
}
-static void always_inline spin_lock_common(spinlock_t *lock,
+static void always_inline spin_lock_common(spinlock_tickets_t *t,
+ union lock_debug *debug,
+ struct lock_profile *profile,
void (*cb)(void *), void *data)
{
spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
LOCK_PROFILE_VAR(block, 0);
- check_lock(&lock->debug, false);
+ check_lock(debug, false);
preempt_disable();
- tickets.head_tail = arch_fetch_and_add(&lock->tickets.head_tail,
- tickets.head_tail);
- while ( tickets.tail != observe_head(&lock->tickets) )
+ tickets.head_tail = arch_fetch_and_add(&t->head_tail, tickets.head_tail);
+ while ( tickets.tail != observe_head(t) )
{
LOCK_PROFILE_BLOCK(block);
if ( cb )
@@ -325,18 +328,19 @@ static void always_inline spin_lock_common(spinlock_t *lock,
arch_lock_relax();
}
arch_lock_acquire_barrier();
- got_lock(&lock->debug);
+ got_lock(debug);
LOCK_PROFILE_GOT(block);
}
void _spin_lock(spinlock_t *lock)
{
- spin_lock_common(lock, NULL, NULL);
+ spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, NULL,
+ NULL);
}
void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
{
- spin_lock_common(lock, cb, data);
+ spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, cb, data);
}
void _spin_lock_irq(spinlock_t *lock)
@@ -355,16 +359,23 @@ unsigned long _spin_lock_irqsave(spinlock_t *lock)
return flags;
}
-void _spin_unlock(spinlock_t *lock)
+static void always_inline spin_unlock_common(spinlock_tickets_t *t,
+ union lock_debug *debug,
+ struct lock_profile *profile)
{
LOCK_PROFILE_REL;
- rel_lock(&lock->debug);
+ rel_lock(debug);
arch_lock_release_barrier();
- add_sized(&lock->tickets.head, 1);
+ add_sized(&t->head, 1);
arch_lock_signal();
preempt_enable();
}
+void _spin_unlock(spinlock_t *lock)
+{
+ spin_unlock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
+}
+
void _spin_unlock_irq(spinlock_t *lock)
{
_spin_unlock(lock);
@@ -377,25 +388,25 @@ void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
local_irq_restore(flags);
}
+static int always_inline spin_is_locked_common(const spinlock_tickets_t *t)
+{
+ return t->head != t->tail;
+}
+
int _spin_is_locked(const spinlock_t *lock)
{
- /*
- * Recursive locks may be locked by another CPU, yet we return
- * "false" here, making this function suitable only for use in
- * ASSERT()s and alike.
- */
- return lock->recurse_cpu == SPINLOCK_NO_CPU
- ? lock->tickets.head != lock->tickets.tail
- : lock->recurse_cpu == smp_processor_id();
+ return spin_is_locked_common(&lock->tickets);
}
-int _spin_trylock(spinlock_t *lock)
+static int always_inline spin_trylock_common(spinlock_tickets_t *t,
+ union lock_debug *debug,
+ struct lock_profile *profile)
{
spinlock_tickets_t old, new;
preempt_disable();
- check_lock(&lock->debug, true);
- old = observe_lock(&lock->tickets);
+ check_lock(debug, true);
+ old = observe_lock(t);
if ( old.head != old.tail )
{
preempt_enable();
@@ -403,8 +414,7 @@ int _spin_trylock(spinlock_t *lock)
}
new = old;
new.tail++;
- if ( cmpxchg(&lock->tickets.head_tail,
- old.head_tail, new.head_tail) != old.head_tail )
+ if ( cmpxchg(&t->head_tail, old.head_tail, new.head_tail) != old.head_tail )
{
preempt_enable();
return 0;
@@ -413,29 +423,41 @@ int _spin_trylock(spinlock_t *lock)
* cmpxchg() is a full barrier so no need for an
* arch_lock_acquire_barrier().
*/
- got_lock(&lock->debug);
+ got_lock(debug);
LOCK_PROFILE_GOT(0);
return 1;
}
-void _spin_barrier(spinlock_t *lock)
+int _spin_trylock(spinlock_t *lock)
+{
+ return spin_trylock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
+}
+
+static void always_inline spin_barrier_common(spinlock_tickets_t *t,
+ union lock_debug *debug,
+ struct lock_profile *profile)
{
spinlock_tickets_t sample;
LOCK_PROFILE_VAR(block, NOW());
- check_barrier(&lock->debug);
+ check_barrier(debug);
smp_mb();
- sample = observe_lock(&lock->tickets);
+ sample = observe_lock(t);
if ( sample.head != sample.tail )
{
- while ( observe_head(&lock->tickets) == sample.head )
+ while ( observe_head(t) == sample.head )
arch_lock_relax();
- LOCK_PROFILE_BLKACC(lock->profile, block);
+ LOCK_PROFILE_BLKACC(profile, block);
}
smp_mb();
}
+void _spin_barrier(spinlock_t *lock)
+{
+ spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
+}
+
int rspin_trylock(rspinlock_t *lock)
{
unsigned int cpu = smp_processor_id();
@@ -448,7 +470,8 @@ int rspin_trylock(rspinlock_t *lock)
if ( likely(lock->recurse_cpu != cpu) )
{
- if ( !spin_trylock(lock) )
+ if ( !spin_trylock_common(&lock->tickets, &lock->debug,
+ LOCK_PROFILE_PAR) )
return 0;
lock->recurse_cpu = cpu;
}
@@ -466,7 +489,8 @@ void rspin_lock(rspinlock_t *lock)
if ( likely(lock->recurse_cpu != cpu) )
{
- _spin_lock(lock);
+ spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, NULL,
+ NULL);
lock->recurse_cpu = cpu;
}
@@ -490,7 +514,7 @@ void rspin_unlock(rspinlock_t *lock)
if ( likely(--lock->recurse_cnt == 0) )
{
lock->recurse_cpu = SPINLOCK_NO_CPU;
- spin_unlock(lock);
+ spin_unlock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
}
}
@@ -163,6 +163,7 @@ extern void cf_check spinlock_profile_reset(unsigned char key);
#else
struct lock_profile_qhead { };
+struct lock_profile { };
#define SPIN_LOCK_UNLOCKED { \
.recurse_cpu = SPINLOCK_NO_CPU, \
Add another function level in spinlock.c hiding the spinlock_t layout from the low level locking code. This is done in preparation of introducing rspinlock_t for recursive locks without having to duplicate all of the locking code. Signed-off-by: Juergen Gross <jgross@suse.com> --- V2: - new patch --- xen/common/spinlock.c | 104 +++++++++++++++++++++++-------------- xen/include/xen/spinlock.h | 1 + 2 files changed, 65 insertions(+), 40 deletions(-)