diff mbox series

[v4,11/12] xen/spinlock: remove indirection through macros for spin_*() functions

Message ID 20231212094725.22184-12-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series xen/spinlock: make recursive spinlocks a dedicated type | expand

Commit Message

Jürgen Groß Dec. 12, 2023, 9:47 a.m. UTC
In reality all spin_*() functions are macros which are defined to just
call a related real function.

Remove this macro layer, as it is adding complexity without any gain.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- new patch
---
 xen/common/spinlock.c      | 28 +++++++++---------
 xen/include/xen/spinlock.h | 58 +++++++++++++++-----------------------
 2 files changed, 36 insertions(+), 50 deletions(-)

Comments

Jan Beulich Feb. 29, 2024, 3:35 p.m. UTC | #1
On 12.12.2023 10:47, Juergen Gross wrote:
> In reality all spin_*() functions are macros which are defined to just
> call a related real function.
> 
> Remove this macro layer, as it is adding complexity without any gain.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>

Acked-by: Jan Beulich <jbeulich@suse.com>
with the same remark as on patch 04.

Jan
diff mbox series

Patch

diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index d0f8393504..296bcf33e6 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -332,30 +332,30 @@  static void always_inline spin_lock_common(spinlock_tickets_t *t,
     LOCK_PROFILE_GOT(block);
 }
 
-void _spin_lock(spinlock_t *lock)
+void spin_lock(spinlock_t *lock)
 {
     spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, NULL,
                      NULL);
 }
 
-void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *data), void *data)
+void spin_lock_cb(spinlock_t *lock, void (*cb)(void *data), void *data)
 {
     spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, cb, data);
 }
 
-void _spin_lock_irq(spinlock_t *lock)
+void spin_lock_irq(spinlock_t *lock)
 {
     ASSERT(local_irq_is_enabled());
     local_irq_disable();
-    _spin_lock(lock);
+    spin_lock(lock);
 }
 
-unsigned long _spin_lock_irqsave(spinlock_t *lock)
+unsigned long __spin_lock_irqsave(spinlock_t *lock)
 {
     unsigned long flags;
 
     local_irq_save(flags);
-    _spin_lock(lock);
+    spin_lock(lock);
     return flags;
 }
 
@@ -371,20 +371,20 @@  static void always_inline spin_unlock_common(spinlock_tickets_t *t,
     preempt_enable();
 }
 
-void _spin_unlock(spinlock_t *lock)
+void spin_unlock(spinlock_t *lock)
 {
     spin_unlock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
 }
 
-void _spin_unlock_irq(spinlock_t *lock)
+void spin_unlock_irq(spinlock_t *lock)
 {
-    _spin_unlock(lock);
+    spin_unlock(lock);
     local_irq_enable();
 }
 
-void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
 {
-    _spin_unlock(lock);
+    spin_unlock(lock);
     local_irq_restore(flags);
 }
 
@@ -393,7 +393,7 @@  static int always_inline spin_is_locked_common(const spinlock_tickets_t *t)
     return t->head != t->tail;
 }
 
-int _spin_is_locked(const spinlock_t *lock)
+int spin_is_locked(const spinlock_t *lock)
 {
     return spin_is_locked_common(&lock->tickets);
 }
@@ -429,7 +429,7 @@  static int always_inline spin_trylock_common(spinlock_tickets_t *t,
     return 1;
 }
 
-int _spin_trylock(spinlock_t *lock)
+int spin_trylock(spinlock_t *lock)
 {
     return spin_trylock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
 }
@@ -453,7 +453,7 @@  static void always_inline spin_barrier_common(spinlock_tickets_t *t,
     smp_mb();
 }
 
-void _spin_barrier(spinlock_t *lock)
+void spin_barrier(spinlock_t *lock)
 {
     spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
 }
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index ca18b9250a..87946965b2 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -224,18 +224,30 @@  typedef struct rspinlock {
 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
 #define rspin_lock_init(l) (*(l) = (rspinlock_t)RSPIN_LOCK_UNLOCKED)
 
-void _spin_lock(spinlock_t *lock);
-void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *data), void *data);
-void _spin_lock_irq(spinlock_t *lock);
-unsigned long _spin_lock_irqsave(spinlock_t *lock);
+void spin_lock(spinlock_t *lock);
+void spin_lock_cb(spinlock_t *lock, void (*cb)(void *data), void *data);
+void spin_lock_irq(spinlock_t *lock);
+#define spin_lock_irqsave(l, f)                                 \
+    ({                                                          \
+        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
+        ((f) = __spin_lock_irqsave(l));                         \
+    })
+unsigned long __spin_lock_irqsave(spinlock_t *lock);
 
-void _spin_unlock(spinlock_t *lock);
-void _spin_unlock_irq(spinlock_t *lock);
-void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
+void spin_unlock(spinlock_t *lock);
+void spin_unlock_irq(spinlock_t *lock);
+void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
 
-int _spin_is_locked(const spinlock_t *lock);
-int _spin_trylock(spinlock_t *lock);
-void _spin_barrier(spinlock_t *lock);
+int spin_is_locked(const spinlock_t *lock);
+int spin_trylock(spinlock_t *lock);
+#define spin_trylock_irqsave(lock, flags)       \
+({                                              \
+    local_irq_save(flags);                      \
+    spin_trylock(lock) ?                        \
+    1 : ({ local_irq_restore(flags); 0; });     \
+})
+/* Ensure a lock is quiescent between two critical operations. */
+void spin_barrier(spinlock_t *lock);
 
 /*
  * rspin_[un]lock(): Use these forms when the lock can (safely!) be
@@ -270,32 +282,6 @@  void nrspin_unlock_irq(rspinlock_t *lock);
 unsigned long __nrspin_lock_irqsave(rspinlock_t *lock);
 void nrspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags);
 
-#define spin_lock(l)                  _spin_lock(l)
-#define spin_lock_cb(l, c, d)         _spin_lock_cb(l, c, d)
-#define spin_lock_irq(l)              _spin_lock_irq(l)
-#define spin_lock_irqsave(l, f)                                 \
-    ({                                                          \
-        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
-        ((f) = _spin_lock_irqsave(l));                          \
-    })
-
-#define spin_unlock(l)                _spin_unlock(l)
-#define spin_unlock_irq(l)            _spin_unlock_irq(l)
-#define spin_unlock_irqrestore(l, f)  _spin_unlock_irqrestore(l, f)
-
-#define spin_is_locked(l)             _spin_is_locked(l)
-#define spin_trylock(l)               _spin_trylock(l)
-
-#define spin_trylock_irqsave(lock, flags)       \
-({                                              \
-    local_irq_save(flags);                      \
-    spin_trylock(lock) ?                        \
-    1 : ({ local_irq_restore(flags); 0; });     \
-})
-
 #define spin_lock_kick(l)             arch_lock_signal_wmb()
 
-/* Ensure a lock is quiescent between two critical operations. */
-#define spin_barrier(l)               _spin_barrier(l)
-
 #endif /* __SPINLOCK_H__ */