diff mbox series

[v6,3/8] xen/spinlock: add missing rspin_is_locked() and rspin_barrier()

Message ID 20240327152229.25847-4-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series xen/spinlock: make recursive spinlocks a dedicated type | expand

Commit Message

Jürgen Groß March 27, 2024, 3:22 p.m. UTC
Add rspin_is_locked() and rspin_barrier() in order to prepare differing
spinlock_t and rspinlock_t types.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- partially carved out from V1 patch, partially new
V5:
- let rspin_is_locked() return bool (Jan Beulich)
V6:
- Re-add comment to _spin_is_locked() (Jan Beulich)
---
 xen/arch/x86/mm/p2m-pod.c     |  2 +-
 xen/common/domain.c           |  2 +-
 xen/common/page_alloc.c       |  2 +-
 xen/common/spinlock.c         | 26 ++++++++++++++++++++------
 xen/drivers/char/console.c    |  4 ++--
 xen/drivers/passthrough/pci.c |  2 +-
 xen/include/xen/spinlock.h    |  4 ++++
 7 files changed, 30 insertions(+), 12 deletions(-)

Comments

Jan Beulich April 2, 2024, 2:37 p.m. UTC | #1
On 27.03.2024 16:22, Juergen Gross wrote:
> Add rspin_is_locked() and rspin_barrier() in order to prepare differing
> spinlock_t and rspinlock_t types.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
diff mbox series

Patch

diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index c48ea169b7..9750a3a21b 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -374,7 +374,7 @@  int p2m_pod_empty_cache(struct domain *d)
 
     /* After this barrier no new PoD activities can happen. */
     BUG_ON(!d->is_dying);
-    spin_barrier(&p2m->pod.lock.lock);
+    rspin_barrier(&p2m->pod.lock.lock);
 
     lock_page_alloc(p2m);
 
diff --git a/xen/common/domain.c b/xen/common/domain.c
index ceb44c8266..282c3ab623 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -991,7 +991,7 @@  int domain_kill(struct domain *d)
     case DOMDYING_alive:
         domain_pause(d);
         d->is_dying = DOMDYING_dying;
-        spin_barrier(&d->domain_lock);
+        rspin_barrier(&d->domain_lock);
         argo_destroy(d);
         vnuma_destroy(d->vnuma);
         domain_set_outstanding_pages(d, 0);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 4d6ce726e3..7c1bdfc046 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -477,7 +477,7 @@  unsigned long domain_adjust_tot_pages(struct domain *d, long pages)
 {
     long dom_before, dom_after, dom_claimed, sys_before, sys_after;
 
-    ASSERT(spin_is_locked(&d->page_alloc_lock));
+    ASSERT(rspin_is_locked(&d->page_alloc_lock));
     d->tot_pages += pages;
 
     /*
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 648393d95f..6572c76114 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -396,13 +396,10 @@  static bool always_inline spin_is_locked_common(const spinlock_tickets_t *t)
 int _spin_is_locked(const spinlock_t *lock)
 {
     /*
-     * Recursive locks may be locked by another CPU, yet we return
-     * "false" here, making this function suitable only for use in
-     * ASSERT()s and alike.
+     * This function is suitable only for use in ASSERT()s and alike, as it
+     * doesn't tell _who_ is holding the lock.
      */
-    return lock->recurse_cpu == SPINLOCK_NO_CPU
-           ? spin_is_locked_common(&lock->tickets)
-           : lock->recurse_cpu == smp_processor_id();
+    return spin_is_locked_common(&lock->tickets);
 }
 
 static bool always_inline spin_trylock_common(spinlock_tickets_t *t,
@@ -465,6 +462,23 @@  void _spin_barrier(spinlock_t *lock)
     spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
 }
 
+bool _rspin_is_locked(const rspinlock_t *lock)
+{
+    /*
+     * Recursive locks may be locked by another CPU, yet we return
+     * "false" here, making this function suitable only for use in
+     * ASSERT()s and alike.
+     */
+    return lock->recurse_cpu == SPINLOCK_NO_CPU
+           ? spin_is_locked_common(&lock->tickets)
+           : lock->recurse_cpu == smp_processor_id();
+}
+
+void _rspin_barrier(rspinlock_t *lock)
+{
+    spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
+}
+
 bool _rspin_trylock(rspinlock_t *lock)
 {
     unsigned int cpu = smp_processor_id();
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index 22f50fc617..d5e6aacc27 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -327,7 +327,7 @@  static void cf_check do_dec_thresh(unsigned char key, bool unused)
 
 static void conring_puts(const char *str, size_t len)
 {
-    ASSERT(spin_is_locked(&console_lock));
+    ASSERT(rspin_is_locked(&console_lock));
 
     while ( len-- )
         conring[CONRING_IDX_MASK(conringp++)] = *str++;
@@ -765,7 +765,7 @@  static void __putstr(const char *str)
 {
     size_t len = strlen(str);
 
-    ASSERT(spin_is_locked(&console_lock));
+    ASSERT(rspin_is_locked(&console_lock));
 
     console_serial_puts(str, len);
     video_puts(str, len);
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 4fcc7e2cde..5a446d3dce 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -65,7 +65,7 @@  void pcidevs_unlock(void)
 
 bool pcidevs_locked(void)
 {
-    return !!spin_is_locked(&_pcidevs_lock);
+    return rspin_is_locked(&_pcidevs_lock);
 }
 
 static struct radix_tree_root pci_segments;
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 8bc4652526..148be1e116 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -297,6 +297,8 @@  void _rspin_lock(rspinlock_t *lock);
 unsigned long _rspin_lock_irqsave(rspinlock_t *lock);
 void _rspin_unlock(rspinlock_t *lock);
 void _rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags);
+bool _rspin_is_locked(const rspinlock_t *lock);
+void _rspin_barrier(rspinlock_t *lock);
 
 static always_inline void rspin_lock(rspinlock_t *lock)
 {
@@ -307,6 +309,8 @@  static always_inline void rspin_lock(rspinlock_t *lock)
 #define rspin_trylock(l)              lock_evaluate_nospec(_rspin_trylock(l))
 #define rspin_unlock(l)               _rspin_unlock(l)
 #define rspin_unlock_irqrestore(l, f) _rspin_unlock_irqrestore(l, f)
+#define rspin_barrier(l)              _rspin_barrier(l)
+#define rspin_is_locked(l)            _rspin_is_locked(l)
 
 #define nrspin_trylock(l)    spin_trylock(l)
 #define nrspin_lock(l)       spin_lock(l)