diff mbox series

[v4,09/12] xen/spinlock: add missing rspin_is_locked() and rspin_barrier()

Message ID 20231212094725.22184-10-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series xen/spinlock: make recursive spinlocks a dedicated type | expand

Commit Message

Jürgen Groß Dec. 12, 2023, 9:47 a.m. UTC
Add rspin_is_locked() and rspin_barrier() in order to prepare differing
spinlock_t and rspinlock_t types.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- partially carved out from V1 patch, partially new
---
 xen/arch/x86/mm/p2m-pod.c     |  2 +-
 xen/common/domain.c           |  2 +-
 xen/common/page_alloc.c       |  2 +-
 xen/common/spinlock.c         | 17 +++++++++++++++++
 xen/drivers/char/console.c    |  4 ++--
 xen/drivers/passthrough/pci.c |  2 +-
 xen/include/xen/spinlock.h    |  2 ++
 7 files changed, 25 insertions(+), 6 deletions(-)

Comments

Jan Beulich Feb. 29, 2024, 2:14 p.m. UTC | #1
On 12.12.2023 10:47, Juergen Gross wrote:
> --- a/xen/common/spinlock.c
> +++ b/xen/common/spinlock.c
> @@ -458,6 +458,23 @@ void _spin_barrier(spinlock_t *lock)
>      spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
>  }
>  
> +int rspin_is_locked(const rspinlock_t *lock)
> +{
> +    /*
> +     * Recursive locks may be locked by another CPU, yet we return
> +     * "false" here, making this function suitable only for use in
> +     * ASSERT()s and alike.
> +     */
> +    return lock->recurse_cpu == SPINLOCK_NO_CPU
> +           ? spin_is_locked_common(&lock->tickets)
> +           : lock->recurse_cpu == smp_processor_id();
> +}
> +
> +void rspin_barrier(rspinlock_t *lock)
> +{
> +    spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
> +}

Ah, here we go. Looks all okay to me, but needs re-ordering such that the
earlier patch won't transiently introduce a regression.

Jan
Jürgen Groß Feb. 29, 2024, 2:18 p.m. UTC | #2
On 29.02.24 15:14, Jan Beulich wrote:
> On 12.12.2023 10:47, Juergen Gross wrote:
>> --- a/xen/common/spinlock.c
>> +++ b/xen/common/spinlock.c
>> @@ -458,6 +458,23 @@ void _spin_barrier(spinlock_t *lock)
>>       spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
>>   }
>>   
>> +int rspin_is_locked(const rspinlock_t *lock)
>> +{
>> +    /*
>> +     * Recursive locks may be locked by another CPU, yet we return
>> +     * "false" here, making this function suitable only for use in
>> +     * ASSERT()s and alike.
>> +     */
>> +    return lock->recurse_cpu == SPINLOCK_NO_CPU
>> +           ? spin_is_locked_common(&lock->tickets)
>> +           : lock->recurse_cpu == smp_processor_id();
>> +}
>> +
>> +void rspin_barrier(rspinlock_t *lock)
>> +{
>> +    spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
>> +}
> 
> Ah, here we go. Looks all okay to me, but needs re-ordering such that the
> earlier patch won't transiently introduce a regression.

Yes, just wanted to answer something similar to your remark on patch 8.


Juergen
diff mbox series

Patch

diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 61a91f5a94..40d3b25d25 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -385,7 +385,7 @@  int p2m_pod_empty_cache(struct domain *d)
 
     /* After this barrier no new PoD activities can happen. */
     BUG_ON(!d->is_dying);
-    spin_barrier(&p2m->pod.lock.lock);
+    rspin_barrier(&p2m->pod.lock.lock);
 
     lock_page_alloc(p2m);
 
diff --git a/xen/common/domain.c b/xen/common/domain.c
index dc97755391..198cb36878 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -982,7 +982,7 @@  int domain_kill(struct domain *d)
     case DOMDYING_alive:
         domain_pause(d);
         d->is_dying = DOMDYING_dying;
-        spin_barrier(&d->domain_lock);
+        rspin_barrier(&d->domain_lock);
         argo_destroy(d);
         vnuma_destroy(d->vnuma);
         domain_set_outstanding_pages(d, 0);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index a25c00a7d4..14010b6fa5 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -476,7 +476,7 @@  unsigned long domain_adjust_tot_pages(struct domain *d, long pages)
 {
     long dom_before, dom_after, dom_claimed, sys_before, sys_after;
 
-    ASSERT(spin_is_locked(&d->page_alloc_lock));
+    ASSERT(rspin_is_locked(&d->page_alloc_lock));
     d->tot_pages += pages;
 
     /*
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 31d12b1006..91e325f3fe 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -458,6 +458,23 @@  void _spin_barrier(spinlock_t *lock)
     spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
 }
 
+int rspin_is_locked(const rspinlock_t *lock)
+{
+    /*
+     * Recursive locks may be locked by another CPU, yet we return
+     * "false" here, making this function suitable only for use in
+     * ASSERT()s and alike.
+     */
+    return lock->recurse_cpu == SPINLOCK_NO_CPU
+           ? spin_is_locked_common(&lock->tickets)
+           : lock->recurse_cpu == smp_processor_id();
+}
+
+void rspin_barrier(rspinlock_t *lock)
+{
+    spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
+}
+
 int rspin_trylock(rspinlock_t *lock)
 {
     unsigned int cpu = smp_processor_id();
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index 8d05c57f69..e6502641eb 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -328,7 +328,7 @@  static void cf_check do_dec_thresh(unsigned char key, struct cpu_user_regs *regs
 
 static void conring_puts(const char *str, size_t len)
 {
-    ASSERT(spin_is_locked(&console_lock));
+    ASSERT(rspin_is_locked(&console_lock));
 
     while ( len-- )
         conring[CONRING_IDX_MASK(conringp++)] = *str++;
@@ -766,7 +766,7 @@  static void __putstr(const char *str)
 {
     size_t len = strlen(str);
 
-    ASSERT(spin_is_locked(&console_lock));
+    ASSERT(rspin_is_locked(&console_lock));
 
     console_serial_puts(str, len);
     video_puts(str, len);
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 41444f8e2e..94f52b7acc 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -64,7 +64,7 @@  void pcidevs_unlock(void)
 
 bool pcidevs_locked(void)
 {
-    return !!spin_is_locked(&_pcidevs_lock);
+    return rspin_is_locked(&_pcidevs_lock);
 }
 
 static struct radix_tree_root pci_segments;
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index d6f4b66613..e63db4eb4c 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -239,6 +239,8 @@  void rspin_lock(rspinlock_t *lock);
 unsigned long __rspin_lock_irqsave(rspinlock_t *lock);
 void rspin_unlock(rspinlock_t *lock);
 void rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags);
+int rspin_is_locked(const rspinlock_t *lock);
+void rspin_barrier(rspinlock_t *lock);
 
 #define spin_lock(l)                  _spin_lock(l)
 #define spin_lock_cb(l, c, d)         _spin_lock_cb(l, c, d)