@@ -385,7 +385,7 @@ int p2m_pod_empty_cache(struct domain *d)
/* After this barrier no new PoD activities can happen. */
BUG_ON(!d->is_dying);
- spin_barrier(&p2m->pod.lock.lock);
+ rspin_barrier(&p2m->pod.lock.lock);
lock_page_alloc(p2m);
@@ -974,7 +974,7 @@ int domain_kill(struct domain *d)
case DOMDYING_alive:
domain_pause(d);
d->is_dying = DOMDYING_dying;
- spin_barrier(&d->domain_lock);
+ rspin_barrier(&d->domain_lock);
argo_destroy(d);
vnuma_destroy(d->vnuma);
domain_set_outstanding_pages(d, 0);
@@ -476,7 +476,7 @@ unsigned long domain_adjust_tot_pages(struct domain *d, long pages)
{
long dom_before, dom_after, dom_claimed, sys_before, sys_after;
- ASSERT(spin_is_locked(&d->page_alloc_lock));
+ ASSERT(rspin_is_locked(&d->page_alloc_lock));
d->tot_pages += pages;
/*
@@ -458,6 +458,23 @@ void _spin_barrier(spinlock_t *lock)
spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
}
+int rspin_is_locked(const rspinlock_t *lock)
+{
+ /*
+ * Recursive locks may be locked by another CPU, yet we return
+ * "false" here, making this function suitable only for use in
+ * ASSERT()s and alike.
+ */
+ return lock->recurse_cpu == SPINLOCK_NO_CPU
+ ? spin_is_locked_common(&lock->tickets)
+ : lock->recurse_cpu == smp_processor_id();
+}
+
+void rspin_barrier(rspinlock_t *lock)
+{
+ spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
+}
+
int rspin_trylock(rspinlock_t *lock)
{
unsigned int cpu = smp_processor_id();
@@ -328,7 +328,7 @@ static void cf_check do_dec_thresh(unsigned char key, struct cpu_user_regs *regs
static void conring_puts(const char *str, size_t len)
{
- ASSERT(spin_is_locked(&console_lock));
+ ASSERT(rspin_is_locked(&console_lock));
while ( len-- )
conring[CONRING_IDX_MASK(conringp++)] = *str++;
@@ -766,7 +766,7 @@ static void __putstr(const char *str)
{
size_t len = strlen(str);
- ASSERT(spin_is_locked(&console_lock));
+ ASSERT(rspin_is_locked(&console_lock));
console_serial_puts(str, len);
video_puts(str, len);
@@ -64,7 +64,7 @@ void pcidevs_unlock(void)
bool pcidevs_locked(void)
{
- return !!spin_is_locked(&_pcidevs_lock);
+ return rspin_is_locked(&_pcidevs_lock);
}
static struct radix_tree_root pci_segments;
@@ -239,6 +239,8 @@ void rspin_lock(rspinlock_t *lock);
unsigned long __rspin_lock_irqsave(rspinlock_t *lock);
void rspin_unlock(rspinlock_t *lock);
void rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags);
+int rspin_is_locked(const rspinlock_t *lock);
+void rspin_barrier(rspinlock_t *lock);
#define spin_lock(l) _spin_lock(l)
#define spin_lock_cb(l, c, d) _spin_lock_cb(l, c, d)
Add rspin_is_locked() and rspin_barrier() in order to prepare differing spinlock_t and rspinlock_t types. Signed-off-by: Juergen Gross <jgross@suse.com> --- V2: - partially carved out from V1 patch, partially new --- xen/arch/x86/mm/p2m-pod.c | 2 +- xen/common/domain.c | 2 +- xen/common/page_alloc.c | 2 +- xen/common/spinlock.c | 17 +++++++++++++++++ xen/drivers/char/console.c | 4 ++-- xen/drivers/passthrough/pci.c | 2 +- xen/include/xen/spinlock.h | 2 ++ 7 files changed, 25 insertions(+), 6 deletions(-)