diff mbox series

x86/asm: Remove semicolon from LOCK prefix

Message ID 20250228221213.2033895-1-andrew.cooper3@citrix.com (mailing list archive)
State New
Headers show
Series x86/asm: Remove semicolon from LOCK prefix | expand

Commit Message

Andrew Cooper Feb. 28, 2025, 10:12 p.m. UTC
Most of Xen's LOCK prefixes are already without semicolon, but we have a few
still remaining in the tree.

As noted in the Linux patch, this adversly affects size/inlining decisions,
and prevents the assembler from diagnosing some classes of error.

No functional change.

Link: https://lore.kernel.org/lkml/20250228085149.2478245-1-ubizjak@gmail.com/
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>

It really does change inlining decisions.  Bloat-o-meter reports:

  add/remove: 1/1 grow/shrink: 7/1 up/down: 691/-247 (444)
  Function                                     old     new   delta
  cpupool_do_sysctl                           1737    1945    +208
  kexec_do_unload.isra                           -     150    +150
  kexec_load_slot                              396     502    +106
  poll_timer_fn                                 37     104     +67
  cpupool_unassign_cpu_finish                  376     427     +51
  cpupool_create_pool                           82     130     +48
  cpupool_assign_cpu_locked                    346     394     +48
  panic                                        170     183     +13
  do_kexec_op_internal                        2117    2022     -95
  kexec_swap_images                            152       -    -152

e.g. poll_timer_fn() previously tailcalled vcpu_unblock(), but now takes a
copy of it.
---
 xen/arch/x86/include/asm/atomic.h   | 16 ++++++++--------
 xen/arch/x86/include/asm/bitops.h   | 12 ++++++------
 xen/arch/x86/include/asm/spinlock.h |  2 +-
 3 files changed, 15 insertions(+), 15 deletions(-)


base-commit: e95dc03717b8ae00de2a0b41b88905af6170b210
diff mbox series

Patch

diff --git a/xen/arch/x86/include/asm/atomic.h b/xen/arch/x86/include/asm/atomic.h
index 16bd0ebfd763..ed4e09a50329 100644
--- a/xen/arch/x86/include/asm/atomic.h
+++ b/xen/arch/x86/include/asm/atomic.h
@@ -115,7 +115,7 @@  static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 static inline void atomic_add(int i, atomic_t *v)
 {
     asm volatile (
-        "lock; addl %1,%0"
+        "lock addl %1,%0"
         : "=m" (*(volatile int *)&v->counter)
         : "ir" (i), "m" (*(volatile int *)&v->counter) );
 }
@@ -128,7 +128,7 @@  static inline int atomic_add_return(int i, atomic_t *v)
 static inline void atomic_sub(int i, atomic_t *v)
 {
     asm volatile (
-        "lock; subl %1,%0"
+        "lock subl %1,%0"
         : "=m" (*(volatile int *)&v->counter)
         : "ir" (i), "m" (*(volatile int *)&v->counter) );
 }
@@ -142,7 +142,7 @@  static inline int atomic_sub_and_test(int i, atomic_t *v)
 {
     bool c;
 
-    asm volatile ( "lock; subl %[i], %[counter]\n\t"
+    asm volatile ( "lock subl %[i], %[counter]\n\t"
                    ASM_FLAG_OUT(, "setz %[zf]\n\t")
                    : [counter] "+m" (*(volatile int *)&v->counter),
                      [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
@@ -154,7 +154,7 @@  static inline int atomic_sub_and_test(int i, atomic_t *v)
 static inline void atomic_inc(atomic_t *v)
 {
     asm volatile (
-        "lock; incl %0"
+        "lock incl %0"
         : "=m" (*(volatile int *)&v->counter)
         : "m" (*(volatile int *)&v->counter) );
 }
@@ -168,7 +168,7 @@  static inline int atomic_inc_and_test(atomic_t *v)
 {
     bool c;
 
-    asm volatile ( "lock; incl %[counter]\n\t"
+    asm volatile ( "lock incl %[counter]\n\t"
                    ASM_FLAG_OUT(, "setz %[zf]\n\t")
                    : [counter] "+m" (*(volatile int *)&v->counter),
                      [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
@@ -180,7 +180,7 @@  static inline int atomic_inc_and_test(atomic_t *v)
 static inline void atomic_dec(atomic_t *v)
 {
     asm volatile (
-        "lock; decl %0"
+        "lock decl %0"
         : "=m" (*(volatile int *)&v->counter)
         : "m" (*(volatile int *)&v->counter) );
 }
@@ -194,7 +194,7 @@  static inline int atomic_dec_and_test(atomic_t *v)
 {
     bool c;
 
-    asm volatile ( "lock; decl %[counter]\n\t"
+    asm volatile ( "lock decl %[counter]\n\t"
                    ASM_FLAG_OUT(, "setz %[zf]\n\t")
                    : [counter] "+m" (*(volatile int *)&v->counter),
                      [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
@@ -207,7 +207,7 @@  static inline int atomic_add_negative(int i, atomic_t *v)
 {
     bool c;
 
-    asm volatile ( "lock; addl %[i], %[counter]\n\t"
+    asm volatile ( "lock addl %[i], %[counter]\n\t"
                    ASM_FLAG_OUT(, "sets %[sf]\n\t")
                    : [counter] "+m" (*(volatile int *)&v->counter),
                      [sf] ASM_FLAG_OUT("=@ccs", "=qm") (c)
diff --git a/xen/arch/x86/include/asm/bitops.h b/xen/arch/x86/include/asm/bitops.h
index 39e37f1cbe55..bb9d75646023 100644
--- a/xen/arch/x86/include/asm/bitops.h
+++ b/xen/arch/x86/include/asm/bitops.h
@@ -32,7 +32,7 @@ 
  */
 static inline void set_bit(int nr, volatile void *addr)
 {
-    asm volatile ( "lock; btsl %1,%0"
+    asm volatile ( "lock btsl %1,%0"
                    : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define set_bit(nr, addr) ({                            \
@@ -73,7 +73,7 @@  static inline void constant_set_bit(int nr, void *addr)
  */
 static inline void clear_bit(int nr, volatile void *addr)
 {
-    asm volatile ( "lock; btrl %1,%0"
+    asm volatile ( "lock btrl %1,%0"
                    : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define clear_bit(nr, addr) ({                          \
@@ -140,7 +140,7 @@  static inline void constant_change_bit(int nr, void *addr)
  */
 static inline void change_bit(int nr, volatile void *addr)
 {
-    asm volatile ( "lock; btcl %1,%0"
+    asm volatile ( "lock btcl %1,%0"
                     : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define change_bit(nr, addr) ({                         \
@@ -160,7 +160,7 @@  static inline int test_and_set_bit(int nr, volatile void *addr)
 {
     int oldbit;
 
-    asm volatile ( "lock; btsl %[nr], %[addr]\n\t"
+    asm volatile ( "lock btsl %[nr], %[addr]\n\t"
                    ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t")
                    : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit),
                      [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" );
@@ -206,7 +206,7 @@  static inline int test_and_clear_bit(int nr, volatile void *addr)
 {
     int oldbit;
 
-    asm volatile ( "lock; btrl %[nr], %[addr]\n\t"
+    asm volatile ( "lock btrl %[nr], %[addr]\n\t"
                    ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t")
                    : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit),
                      [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" );
@@ -266,7 +266,7 @@  static inline int test_and_change_bit(int nr, volatile void *addr)
 {
     int oldbit;
 
-    asm volatile ( "lock; btcl %[nr], %[addr]\n\t"
+    asm volatile ( "lock btcl %[nr], %[addr]\n\t"
                    ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t")
                    : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit),
                      [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" );
diff --git a/xen/arch/x86/include/asm/spinlock.h b/xen/arch/x86/include/asm/spinlock.h
index 56f60957522a..834e8c580ebd 100644
--- a/xen/arch/x86/include/asm/spinlock.h
+++ b/xen/arch/x86/include/asm/spinlock.h
@@ -3,7 +3,7 @@ 
 
 #define _raw_read_unlock(l) \
     BUILD_BUG_ON(sizeof((l)->lock) != 4); /* Clang doesn't support %z in asm. */ \
-    asm volatile ( "lock; decl %0" : "+m" ((l)->lock) :: "memory" )
+    asm volatile ( "lock decl %0" : "+m" ((l)->lock) :: "memory" )
 
 /*
  * On x86 the only reordering is of reads with older writes.  In the