@@ -596,7 +596,7 @@ unsigned long domain_get_maximum_gpfn(struct domain *d);
/* Definition of an mm lock: spinlock with extra fields for debugging */
typedef struct mm_lock {
- spinlock_t lock;
+ rspinlock_t lock;
int unlock_level;
int locker; /* processor which holds the lock */
const char *locker_function; /* func that took it */
@@ -20,7 +20,7 @@ DECLARE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
static inline void mm_lock_init(mm_lock_t *l)
{
- spin_lock_init(&l->lock);
+ rspin_lock_init(&l->lock);
l->locker = -1;
l->locker_function = "nobody";
l->unlock_level = 0;
@@ -627,8 +627,8 @@ struct domain *domain_create(domid_t domid,
atomic_set(&d->refcnt, 1);
RCU_READ_LOCK_INIT(&d->rcu_lock);
- spin_lock_init_prof(d, domain_lock);
- spin_lock_init_prof(d, page_alloc_lock);
+ rspin_lock_init_prof(d, domain_lock);
+ rspin_lock_init_prof(d, page_alloc_lock);
spin_lock_init(&d->hypercall_deadlock_mutex);
INIT_PAGE_LIST_HEAD(&d->page_list);
INIT_PAGE_LIST_HEAD(&d->extra_page_list);
@@ -1331,7 +1331,7 @@ unsigned int ioreq_broadcast(ioreq_t *p, bool buffered)
void ioreq_domain_init(struct domain *d)
{
- spin_lock_init(&d->ioreq_server.lock);
+ rspin_lock_init(&d->ioreq_server.lock);
arch_ioreq_domain_init(d);
}
@@ -120,7 +120,7 @@ static int __read_mostly sercon_handle = -1;
int8_t __read_mostly opt_console_xen; /* console=xen */
#endif
-static DEFINE_SPINLOCK(console_lock);
+static DEFINE_RSPINLOCK(console_lock);
/*
* To control the amount of printing, thresholds are added.
@@ -1169,7 +1169,7 @@ void console_force_unlock(void)
{
watchdog_disable();
spin_debug_disable();
- spin_lock_init(&console_lock);
+ rspin_lock_init(&console_lock);
serial_force_unlock(sercon_handle);
console_locks_busted = 1;
console_start_sync();
@@ -50,7 +50,7 @@ struct pci_seg {
} bus2bridge[MAX_BUSES];
};
-static spinlock_t _pcidevs_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_RSPINLOCK(_pcidevs_lock);
void pcidevs_lock(void)
{
@@ -376,9 +376,9 @@ struct domain
rcu_read_lock_t rcu_lock;
- spinlock_t domain_lock;
+ rspinlock_t domain_lock;
- spinlock_t page_alloc_lock; /* protects all the following fields */
+ rspinlock_t page_alloc_lock; /* protects all the following fields */
struct page_list_head page_list; /* linked list */
struct page_list_head extra_page_list; /* linked list (size extra_pages) */
struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
@@ -597,7 +597,7 @@ struct domain
#ifdef CONFIG_IOREQ_SERVER
/* Lock protects all other values in the sub-struct */
struct {
- spinlock_t lock;
+ rspinlock_t lock;
struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
} ioreq_server;
#endif
@@ -45,7 +45,7 @@ union lock_debug { };
lock profiling on:
Global locks which should be subject to profiling must be declared via
- DEFINE_SPINLOCK.
+ DEFINE_[R]SPINLOCK.
For locks in structures further measures are necessary:
- the structure definition must include a profile_head with exactly this
@@ -56,7 +56,7 @@ union lock_debug { };
- the single locks which are subject to profiling have to be initialized
via
- spin_lock_init_prof(ptr, lock);
+ [r]spin_lock_init_prof(ptr, lock);
with ptr being the main structure pointer and lock the spinlock field
@@ -109,12 +109,16 @@ struct lock_profile_qhead {
spinlock_t l = _SPIN_LOCK_UNLOCKED(NULL); \
static struct lock_profile __lock_profile_data_##l = _LOCK_PROFILE(l); \
_LOCK_PROFILE_PTR(l)
+#define DEFINE_RSPINLOCK(l) \
+ rspinlock_t l = _SPIN_LOCK_UNLOCKED(NULL); \
+ static struct lock_profile __lock_profile_data_##l = _LOCK_PROFILE(l); \
+ _LOCK_PROFILE_PTR(l)
-#define spin_lock_init_prof(s, l) \
+#define __spin_lock_init_prof(s, l, locktype) \
do { \
struct lock_profile *prof; \
prof = xzalloc(struct lock_profile); \
- (s)->l = (spinlock_t)_SPIN_LOCK_UNLOCKED(prof); \
+ (s)->l = (locktype)_SPIN_LOCK_UNLOCKED(prof); \
if ( !prof ) \
{ \
printk(XENLOG_WARNING \
@@ -128,6 +132,9 @@ struct lock_profile_qhead {
(s)->profile_head.elem_q = prof; \
} while( 0 )
+#define spin_lock_init_prof(s, l) __spin_lock_init_prof(s, l, spinlock_t)
+#define rspin_lock_init_prof(s, l) __spin_lock_init_prof(s, l, rspinlock_t)
+
void _lock_profile_register_struct(
int32_t type, struct lock_profile_qhead *qhead, int32_t idx);
void _lock_profile_deregister_struct(int32_t type,
@@ -151,8 +158,10 @@ struct lock_profile_qhead { };
.debug =_LOCK_DEBUG, \
}
#define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
+#define DEFINE_RSPINLOCK(l) rspinlock_t l = SPIN_LOCK_UNLOCKED
#define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l))
+#define rspin_lock_init_prof(s, l) rspin_lock_init(&((s)->l))
#define lock_profile_register_struct(type, ptr, idx)
#define lock_profile_deregister_struct(type, ptr)
#define spinlock_profile_printall(key)
@@ -182,8 +191,10 @@ typedef struct spinlock {
#endif
} spinlock_t;
+typedef spinlock_t rspinlock_t;
#define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
+#define rspin_lock_init(l) (*(l) = (rspinlock_t)SPIN_LOCK_UNLOCKED)
void _spin_lock(spinlock_t *lock);
void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data);
Introduce a new type "rspinlock_t" to be used for recursive spinlocks. For now it is only an alias of spinlock_t, so both types can still be used for recursive spinlocks. This will be changed later, though. Switch all recursive spinlocks to the new type. Define the initializer helpers and use them where appropriate. Signed-off-by: Juergen Gross <jgross@suse.com> --- V2: - carved out from V1 patch --- xen/arch/x86/include/asm/mm.h | 2 +- xen/arch/x86/mm/mm-locks.h | 2 +- xen/common/domain.c | 4 ++-- xen/common/ioreq.c | 2 +- xen/drivers/char/console.c | 4 ++-- xen/drivers/passthrough/pci.c | 2 +- xen/include/xen/sched.h | 6 +++--- xen/include/xen/spinlock.h | 19 +++++++++++++++---- 8 files changed, 26 insertions(+), 15 deletions(-)