@@ -105,7 +105,10 @@
* indicated by comparing RETURN with OLD.
*/
-#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \
+#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ r, p, o, n) \
({ \
__label__ no_zabha_zacas, end; \
\
@@ -119,9 +122,9 @@
: : : : no_zabha_zacas); \
\
__asm__ __volatile__ ( \
- prepend \
+ cas_prepend \
" amocas" cas_sfx " %0, %z2, %1\n" \
- append \
+ cas_append \
: "+&r" (r), "+A" (*(p)) \
: "rJ" (n) \
: "memory"); \
@@ -139,7 +142,7 @@ no_zabha_zacas:; \
ulong __rc; \
\
__asm__ __volatile__ ( \
- prepend \
+ sc_prepend \
"0: lr.w %0, %2\n" \
" and %1, %0, %z5\n" \
" bne %1, %z3, 1f\n" \
@@ -147,7 +150,7 @@ no_zabha_zacas:; \
" or %1, %1, %z4\n" \
" sc.w" sc_sfx " %1, %1, %2\n" \
" bnez %1, 0b\n" \
- append \
+ sc_append \
"1:\n" \
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
: "rJ" ((long)__oldx), "rJ" (__newx), \
@@ -159,7 +162,10 @@ no_zabha_zacas:; \
end:; \
})
-#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \
+#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ r, p, co, o, n) \
({ \
__label__ no_zacas, end; \
register unsigned int __rc; \
@@ -170,9 +176,9 @@ end:; \
: : : : no_zacas); \
\
__asm__ __volatile__ ( \
- prepend \
- " amocas" sc_cas_sfx " %0, %z2, %1\n" \
- append \
+ cas_prepend \
+ " amocas" cas_sfx " %0, %z2, %1\n" \
+ cas_append \
: "+&r" (r), "+A" (*(p)) \
: "rJ" (n) \
: "memory"); \
@@ -181,12 +187,12 @@ end:; \
\
no_zacas: \
__asm__ __volatile__ ( \
- prepend \
+ sc_prepend \
"0: lr" lr_sfx " %0, %2\n" \
" bne %0, %z3, 1f\n" \
- " sc" sc_cas_sfx " %1, %z4, %2\n" \
+ " sc" sc_sfx " %1, %z4, %2\n" \
" bnez %1, 0b\n" \
- append \
+ sc_append \
"1:\n" \
: "=&r" (r), "=&r" (__rc), "+A" (*(p)) \
: "rJ" (co o), "rJ" (n) \
@@ -195,7 +201,9 @@ no_zacas: \
end:; \
})
-#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append) \
+#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(__ptr)) __old = (old); \
@@ -204,22 +212,28 @@ end:; \
\
switch (sizeof(*__ptr)) { \
case 1: \
- __arch_cmpxchg_masked(sc_sfx, ".b" sc_sfx, \
- prepend, append, \
- __ret, __ptr, __old, __new); \
+ __arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ __ret, __ptr, __old, __new); \
break; \
case 2: \
- __arch_cmpxchg_masked(sc_sfx, ".h" sc_sfx, \
- prepend, append, \
- __ret, __ptr, __old, __new); \
+ __arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ __ret, __ptr, __old, __new); \
break; \
case 4: \
- __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \
- __ret, __ptr, (long), __old, __new); \
+ __arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ __ret, __ptr, (long), __old, __new); \
break; \
case 8: \
- __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \
- __ret, __ptr, /**/, __old, __new); \
+ __arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ __ret, __ptr, /**/, __old, __new); \
break; \
default: \
BUILD_BUG(); \
@@ -228,16 +242,19 @@ end:; \
})
#define arch_cmpxchg_relaxed(ptr, o, n) \
- _arch_cmpxchg((ptr), (o), (n), "", "", "")
+ _arch_cmpxchg((ptr), (o), (n), "", "", "", "", "", "")
#define arch_cmpxchg_acquire(ptr, o, n) \
- _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
+ _arch_cmpxchg((ptr), (o), (n), "", "", \
+ "", RISCV_ACQUIRE_BARRIER, "", RISCV_ACQUIRE_BARRIER)
#define arch_cmpxchg_release(ptr, o, n) \
- _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
+ _arch_cmpxchg((ptr), (o), (n), "", "", \
+ RISCV_RELEASE_BARRIER, "", RISCV_RELEASE_BARRIER, "")
#define arch_cmpxchg(ptr, o, n) \
- _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n")
+ _arch_cmpxchg((ptr), (o), (n), ".rl", ".aqrl", \
+ "", RISCV_FULL_BARRIER, "", "")
#define arch_cmpxchg_local(ptr, o, n) \
arch_cmpxchg_relaxed((ptr), (o), (n))
The current fully-ordered cmpxchgXX() implementation results in: amocas.X.rl a5,a4,(s1) fence rw,rw This provides enough sync but we can actually use the following better mapping instead: amocas.X.aqrl a5,a4,(s1) Suggested-by: Andrea Parri <andrea@rivosinc.com> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> --- arch/riscv/include/asm/cmpxchg.h | 71 ++++++++++++++++++++------------ 1 file changed, 44 insertions(+), 27 deletions(-)