@@ -1317,7 +1317,7 @@ void share_xen_page_with_guest(struct page_info *page, struct domain *d,
if ( page_get_owner(page) == d )
return;
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
/*
* The incremented type count pins as writable or read-only.
@@ -1348,7 +1348,7 @@ void share_xen_page_with_guest(struct page_info *page, struct domain *d,
page_list_add_tail(page, &d->xenpage_list);
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
}
int xenmem_add_to_physmap_one(
@@ -212,7 +212,7 @@ void dump_pageframe_info(struct domain *d)
{
unsigned long total[MASK_EXTR(PGT_type_mask, PGT_type_mask) + 1] = {};
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->page_list )
{
unsigned int index = MASK_EXTR(page->u.inuse.type_info,
@@ -231,13 +231,13 @@ void dump_pageframe_info(struct domain *d)
_p(mfn_x(page_to_mfn(page))),
page->count_info, page->u.inuse.type_info);
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
}
if ( is_hvm_domain(d) )
p2m_pod_dump_data(d);
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->xenpage_list )
{
@@ -253,7 +253,7 @@ void dump_pageframe_info(struct domain *d)
page->count_info, page->u.inuse.type_info);
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
}
void update_guest_memory_policy(struct vcpu *v,
@@ -2446,10 +2446,10 @@ int domain_relinquish_resources(struct domain *d)
d->arch.auto_unmask = 0;
}
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_splice(&d->arch.relmem_list, &d->page_list);
INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
PROGRESS(xen):
@@ -482,7 +482,7 @@ void share_xen_page_with_guest(struct page_info *page, struct domain *d,
set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), INVALID_M2P_ENTRY);
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
/* The incremented type count pins as writable or read-only. */
page->u.inuse.type_info =
@@ -502,7 +502,7 @@ void share_xen_page_with_guest(struct page_info *page, struct domain *d,
page_list_add_tail(page, &d->xenpage_list);
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
}
void make_cr3(struct vcpu *v, mfn_t mfn)
@@ -3584,11 +3584,11 @@ long do_mmuext_op(
{
bool drop_ref;
- spin_lock(&pg_owner->page_alloc_lock);
+ nrspin_lock(&pg_owner->page_alloc_lock);
drop_ref = (pg_owner->is_dying &&
test_and_clear_bit(_PGT_pinned,
&page->u.inuse.type_info));
- spin_unlock(&pg_owner->page_alloc_lock);
+ nrspin_unlock(&pg_owner->page_alloc_lock);
if ( drop_ref )
{
pin_drop:
@@ -4411,7 +4411,7 @@ int steal_page(
* that it might be upon return from alloc_domheap_pages with
* MEMF_no_owner set.
*/
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
BUG_ON(page->u.inuse.type_info & (PGT_count_mask | PGT_locked |
PGT_pinned));
@@ -4423,7 +4423,7 @@ int steal_page(
if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) )
drop_dom_ref = true;
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
if ( unlikely(drop_dom_ref) )
put_domain(d);
@@ -746,11 +746,11 @@ static int page_make_private(struct domain *d, struct page_info *page)
if ( !get_page(page, dom_cow) )
return -EINVAL;
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
if ( d->is_dying )
{
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
put_page(page);
return -EBUSY;
}
@@ -758,7 +758,7 @@ static int page_make_private(struct domain *d, struct page_info *page)
expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
if ( page->u.inuse.type_info != expected_type )
{
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
put_page(page);
return -EEXIST;
}
@@ -775,7 +775,7 @@ static int page_make_private(struct domain *d, struct page_info *page)
if ( domain_adjust_tot_pages(d, 1) == 1 )
get_knownalive_domain(d);
page_list_add_tail(page, &d->page_list);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
put_page(page);
@@ -27,7 +27,7 @@
static inline void lock_page_alloc(struct p2m_domain *p2m)
{
page_alloc_mm_pre_lock(p2m->domain);
- spin_lock(&(p2m->domain->page_alloc_lock));
+ nrspin_lock(&(p2m->domain->page_alloc_lock));
page_alloc_mm_post_lock(p2m->domain,
p2m->domain->arch.page_alloc_unlock_level);
}
@@ -35,7 +35,7 @@ static inline void lock_page_alloc(struct p2m_domain *p2m)
static inline void unlock_page_alloc(struct p2m_domain *p2m)
{
page_alloc_mm_unlock(p2m->domain->arch.page_alloc_unlock_level);
- spin_unlock(&(p2m->domain->page_alloc_lock));
+ nrspin_unlock(&(p2m->domain->page_alloc_lock));
}
/*
@@ -2225,7 +2225,7 @@ void audit_p2m(struct domain *d,
/* Audit part two: walk the domain's page allocation list, checking
* the m2p entries. */
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->page_list )
{
mfn = mfn_x(page_to_mfn(page));
@@ -2277,7 +2277,7 @@ void audit_p2m(struct domain *d,
P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx\n",
mfn, gfn, mfn_x(p2mfn));
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
pod_unlock(p2m);
p2m_unlock(p2m);
@@ -205,14 +205,14 @@ static void tboot_gen_domain_integrity(const uint8_t key[TB_KEY_SIZE],
continue;
printk("MACing Domain %u\n", d->domain_id);
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_for_each(page, &d->page_list)
{
void *pg = __map_domain_page(page);
vmac_update(pg, PAGE_SIZE, &ctx);
unmap_domain_page(pg);
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
if ( is_iommu_enabled(d) && is_vtd )
{
@@ -621,14 +621,14 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
uint64_t new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT - 10);
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
/*
* NB. We removed a check that new_max >= current tot_pages; this means
* that the domain will now be allowed to "ratchet" down to new_max. In
* the meantime, while tot > max, all new allocations are disallowed.
*/
d->max_pages = min(new_max, (uint64_t)(typeof(d->max_pages))-1);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
break;
}
@@ -2376,7 +2376,7 @@ gnttab_transfer(
mfn = page_to_mfn(page);
}
- spin_lock(&e->page_alloc_lock);
+ nrspin_lock(&e->page_alloc_lock);
/*
* Check that 'e' will accept the page and has reservation
@@ -2387,7 +2387,7 @@ gnttab_transfer(
unlikely(domain_tot_pages(e) >= e->max_pages) ||
unlikely(!(e->tot_pages + 1)) )
{
- spin_unlock(&e->page_alloc_lock);
+ nrspin_unlock(&e->page_alloc_lock);
if ( e->is_dying )
gdprintk(XENLOG_INFO, "Transferee d%d is dying\n",
@@ -2411,7 +2411,7 @@ gnttab_transfer(
* safely drop the lock and re-aquire it later to add page to the
* pagelist.
*/
- spin_unlock(&e->page_alloc_lock);
+ nrspin_unlock(&e->page_alloc_lock);
okay = gnttab_prepare_for_transfer(e, d, gop.ref);
/*
@@ -2427,9 +2427,9 @@ gnttab_transfer(
* Need to grab this again to safely free our "reserved"
* page in the page total
*/
- spin_lock(&e->page_alloc_lock);
+ nrspin_lock(&e->page_alloc_lock);
drop_dom_ref = !domain_adjust_tot_pages(e, -1);
- spin_unlock(&e->page_alloc_lock);
+ nrspin_unlock(&e->page_alloc_lock);
if ( okay /* i.e. e->is_dying due to the surrounding if() */ )
gdprintk(XENLOG_INFO, "Transferee d%d is now dying\n",
@@ -770,10 +770,10 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
(1UL << in_chunk_order)) -
(j * (1UL << exch.out.extent_order)));
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
drop_dom_ref = (dec_count &&
!domain_adjust_tot_pages(d, -dec_count));
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
if ( drop_dom_ref )
put_domain(d);
@@ -718,13 +718,13 @@ static void cf_check dump_numa(unsigned char key)
memset(page_num_node, 0, sizeof(page_num_node));
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->page_list )
{
i = page_to_nid(page);
page_num_node[i]++;
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
for_each_online_node ( i )
printk(" Node %u: %u\n", i, page_num_node[i]);
@@ -515,7 +515,7 @@ int domain_set_outstanding_pages(struct domain *d, unsigned long pages)
* must always take the global heap_lock rather than only in the much
* rarer case that d->outstanding_pages is non-zero
*/
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
spin_lock(&heap_lock);
/* pages==0 means "unset" the claim. */
@@ -561,7 +561,7 @@ int domain_set_outstanding_pages(struct domain *d, unsigned long pages)
out:
spin_unlock(&heap_lock);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
return ret;
}
@@ -2343,7 +2343,7 @@ int assign_pages(
int rc = 0;
unsigned int i;
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
if ( unlikely(d->is_dying) )
{
@@ -2425,7 +2425,7 @@ int assign_pages(
}
out:
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
return rc;
}
@@ -2906,9 +2906,9 @@ mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
ASSERT_ALLOC_CONTEXT();
/* Acquire a page from reserved page list(resv_page_list). */
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page = page_list_remove_head(&d->resv_page_list);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
if ( unlikely(!page) )
return INVALID_MFN;
@@ -2927,9 +2927,9 @@ mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
*/
unprepare_staticmem_pages(page, 1, false);
fail:
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_add_tail(page, &d->resv_page_list);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
return INVALID_MFN;
}
#endif
@@ -369,9 +369,9 @@ long read_console_ring(struct xen_sysctl_readconsole *op)
if ( op->clear )
{
- spin_lock_irq(&console_lock);
+ nrspin_lock_irq(&console_lock);
conringc = p - c > conring_size ? p - conring_size : c;
- spin_unlock_irq(&console_lock);
+ nrspin_unlock_irq(&console_lock);
}
op->count = sofar;
@@ -639,7 +639,7 @@ static long guest_console_write(XEN_GUEST_HANDLE_PARAM(char) buffer,
if ( is_hardware_domain(cd) )
{
/* Use direct console output as it could be interactive */
- spin_lock_irq(&console_lock);
+ nrspin_lock_irq(&console_lock);
console_serial_puts(kbuf, kcount);
video_puts(kbuf, kcount);
@@ -660,7 +660,7 @@ static long guest_console_write(XEN_GUEST_HANDLE_PARAM(char) buffer,
tasklet_schedule(¬ify_dom0_con_ring_tasklet);
}
- spin_unlock_irq(&console_lock);
+ nrspin_unlock_irq(&console_lock);
}
else
{
@@ -1027,9 +1027,9 @@ void __init console_init_preirq(void)
pv_console_set_rx_handler(serial_rx);
/* HELLO WORLD --- start-of-day banner text. */
- spin_lock(&console_lock);
+ nrspin_lock(&console_lock);
__putstr(xen_banner());
- spin_unlock(&console_lock);
+ nrspin_unlock(&console_lock);
printk("Xen version %d.%d%s (%s@%s) (%s) %s %s\n",
xen_major_version(), xen_minor_version(), xen_extra_version(),
xen_compile_by(), xen_compile_domain(), xen_compiler(),
@@ -1066,13 +1066,13 @@ void __init console_init_ring(void)
}
opt_conring_size = PAGE_SIZE << order;
- spin_lock_irqsave(&console_lock, flags);
+ nrspin_lock_irqsave(&console_lock, flags);
for ( i = conringc ; i != conringp; i++ )
ring[i & (opt_conring_size - 1)] = conring[i & (conring_size - 1)];
conring = ring;
smp_wmb(); /* Allow users of console_force_unlock() to see larger buffer. */
conring_size = opt_conring_size;
- spin_unlock_irqrestore(&console_lock, flags);
+ nrspin_unlock_irqrestore(&console_lock, flags);
printk("Allocated console ring of %u KiB.\n", opt_conring_size >> 10);
}
@@ -101,6 +101,8 @@ struct lock_profile_qhead {
};
#define _LOCK_PROFILE(lockname) { .name = #lockname, .lock = &lockname, }
+#define _RLOCK_PROFILE(lockname) { .name = #lockname, .rlock = &lockname, \
+ .is_rlock = 1, }
#define _LOCK_PROFILE_PTR(name) \
static struct lock_profile * const __lock_profile_##name \
__used_section(".lockprofile.data") = \
@@ -117,10 +119,10 @@ struct lock_profile_qhead {
_LOCK_PROFILE_PTR(l)
#define DEFINE_RSPINLOCK(l) \
rspinlock_t l = _SPIN_LOCK_UNLOCKED(NULL); \
- static struct lock_profile __lock_profile_data_##l = _LOCK_PROFILE(l); \
+ static struct lock_profile __lock_profile_data_##l = _RLOCK_PROFILE(l); \
_LOCK_PROFILE_PTR(l)
-#define __spin_lock_init_prof(s, l, locktype) \
+#define __spin_lock_init_prof(s, l, lockptr, locktype, isr) \
do { \
struct lock_profile *prof; \
prof = xzalloc(struct lock_profile); \
@@ -133,13 +135,16 @@ struct lock_profile_qhead {
break; \
} \
prof->name = #l; \
- prof->lock = &(s)->l; \
+ prof->lockptr = &(s)->l; \
+ prof->is_rlock = isr; \
prof->next = (s)->profile_head.elem_q; \
(s)->profile_head.elem_q = prof; \
} while( 0 )
-#define spin_lock_init_prof(s, l) __spin_lock_init_prof(s, l, spinlock_t)
-#define rspin_lock_init_prof(s, l) __spin_lock_init_prof(s, l, rspinlock_t)
+#define spin_lock_init_prof(s, l) \
+ __spin_lock_init_prof(s, l, lock, spinlock_t, 0)
+#define rspin_lock_init_prof(s, l) \
+ __spin_lock_init_prof(s, l, rlock, rspinlock_t, 1)
void _lock_profile_register_struct(
int32_t type, struct lock_profile_qhead *qhead, int32_t idx);
@@ -174,6 +179,7 @@ struct lock_profile_qhead { };
#endif
+
typedef union {
uint32_t head_tail;
struct {
@@ -261,4 +267,12 @@ void rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags);
/* Ensure a lock is quiescent between two critical operations. */
#define spin_barrier(l) _spin_barrier(l)
+#define nrspin_trylock(l) spin_trylock(l)
+#define nrspin_lock(l) spin_lock(l)
+#define nrspin_unlock(l) spin_unlock(l)
+#define nrspin_lock_irq(l) spin_lock_irq(l)
+#define nrspin_unlock_irq(l) spin_unlock_irq(l)
+#define nrspin_lock_irqsave(l, f) spin_lock_irqsave(l, f)
+#define nrspin_unlock_irqrestore(l, f) spin_unlock_irqrestore(l, f)
+
#endif /* __SPINLOCK_H__ */
In order to prepare a type-safe recursive spinlock structure, add explicitly non-recursive locking functions to be used for non-recursive locking of spinlocks, which are used recursively, too. Signed-off-by: Juergen Gross <jgross@suse.com> --- V2: - rename functions (Jan Beulich) - get rid of !! in pcidevs_locked() (Jan Beulich) --- xen/arch/arm/mm.c | 4 ++-- xen/arch/x86/domain.c | 12 ++++++------ xen/arch/x86/mm.c | 12 ++++++------ xen/arch/x86/mm/mem_sharing.c | 8 ++++---- xen/arch/x86/mm/p2m-pod.c | 4 ++-- xen/arch/x86/mm/p2m.c | 4 ++-- xen/arch/x86/tboot.c | 4 ++-- xen/common/domctl.c | 4 ++-- xen/common/grant_table.c | 10 +++++----- xen/common/memory.c | 4 ++-- xen/common/numa.c | 4 ++-- xen/common/page_alloc.c | 16 ++++++++-------- xen/drivers/char/console.c | 16 ++++++++-------- xen/include/xen/spinlock.h | 24 +++++++++++++++++++----- 14 files changed, 70 insertions(+), 56 deletions(-)