diff mbox series

[v2,04/10] riscv: Improve amocas.X use in cmpxchg()

Message ID 20240626130347.520750-5-alexghiti@rivosinc.com (mailing list archive)
State Superseded
Headers show
Series Zacas/Zabha support and qspinlocks | expand

Checks

Context Check Description
conchuod/vmtest-fixes-PR fail PR summary
conchuod/patch-4-test-1 success .github/scripts/patches/tests/build_rv32_defconfig.sh
conchuod/patch-4-test-2 success .github/scripts/patches/tests/build_rv64_clang_allmodconfig.sh
conchuod/patch-4-test-3 success .github/scripts/patches/tests/build_rv64_gcc_allmodconfig.sh
conchuod/patch-4-test-4 success .github/scripts/patches/tests/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-4-test-5 success .github/scripts/patches/tests/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-4-test-6 warning .github/scripts/patches/tests/checkpatch.sh
conchuod/patch-4-test-7 success .github/scripts/patches/tests/dtb_warn_rv64.sh
conchuod/patch-4-test-8 success .github/scripts/patches/tests/header_inline.sh
conchuod/patch-4-test-9 success .github/scripts/patches/tests/kdoc.sh
conchuod/patch-4-test-10 success .github/scripts/patches/tests/module_param.sh
conchuod/patch-4-test-11 success .github/scripts/patches/tests/verify_fixes.sh
conchuod/patch-4-test-12 success .github/scripts/patches/tests/verify_signedoff.sh

Commit Message

Alexandre Ghiti June 26, 2024, 1:03 p.m. UTC
cmpxchg() uses amocas.X instructions from Zacas and Zabha but still uses
the LR/SC acquire/release semantics which require barriers.

Let's improve that by using proper amocas acquire/release semantics in
order to avoid any of those barriers.

Suggested-by: Andrea Parri <andrea@rivosinc.com>
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
---
 arch/riscv/include/asm/cmpxchg.h | 60 ++++++++++++++++++--------------
 1 file changed, 33 insertions(+), 27 deletions(-)

Comments

Andrea Parri June 27, 2024, 1:31 p.m. UTC | #1
On Wed, Jun 26, 2024 at 03:03:41PM +0200, Alexandre Ghiti wrote:
> cmpxchg() uses amocas.X instructions from Zacas and Zabha but still uses
> the LR/SC acquire/release semantics which require barriers.
> 
> Let's improve that by using proper amocas acquire/release semantics in
> order to avoid any of those barriers.

I can't really parse this changelog...


> Suggested-by: Andrea Parri <andrea@rivosinc.com>
> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
> ---
>  arch/riscv/include/asm/cmpxchg.h | 60 ++++++++++++++++++--------------
>  1 file changed, 33 insertions(+), 27 deletions(-)
> 
> diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
> index b9a3fdcec919..3c65b00a0d36 100644
> --- a/arch/riscv/include/asm/cmpxchg.h
> +++ b/arch/riscv/include/asm/cmpxchg.h
> @@ -105,7 +105,9 @@
>   * indicated by comparing RETURN with OLD.
>   */
>  
> -#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n)	\
> +#define __arch_cmpxchg_masked(sc_sfx, cas_sfx,				\
> +			      sc_prepend, sc_append,			\
> +			      r, p, o, n)				\
>  ({									\
>  	__label__ no_zacas, zabha, end;					\
>  									\
> @@ -129,7 +131,7 @@ no_zacas:;								\
>  	ulong __rc;							\
>  									\
>  	__asm__ __volatile__ (						\
> -		prepend							\
> +		sc_prepend							\
>  		"0:	lr.w %0, %2\n"					\
>  		"	and  %1, %0, %z5\n"				\
>  		"	bne  %1, %z3, 1f\n"				\
> @@ -137,7 +139,7 @@ no_zacas:;								\
>  		"	or   %1, %1, %z4\n"				\
>  		"	sc.w" sc_sfx " %1, %1, %2\n"			\
>  		"	bnez %1, 0b\n"					\
> -		append							\
> +		sc_append							\
>  		"1:\n"							\
>  		: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b))	\
>  		: "rJ" ((long)__oldx), "rJ" (__newx),			\
> @@ -150,9 +152,7 @@ no_zacas:;								\
>  zabha:									\
>  	if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {			\
>  		__asm__ __volatile__ (					\
> -			prepend						\
>  			"	amocas" cas_sfx " %0, %z2, %1\n"	\
> -			append						\
>  			: "+&r" (r), "+A" (*(p))			\
>  			: "rJ" (n)					\
>  			: "memory");					\
> @@ -160,7 +160,9 @@ zabha:									\
>  end:;									\
>  })
>  
> -#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n)	\
> +#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx,				\
> +		       sc_prepend, sc_append,				\
> +		       r, p, co, o, n)					\
>  ({									\
>  	__label__ zacas, end;						\
>  	register unsigned int __rc;					\
> @@ -172,12 +174,12 @@ end:;									\
>  	}								\
>  									\
>  	__asm__ __volatile__ (						\
> -		prepend							\
> +		sc_prepend							\
>  		"0:	lr" lr_sfx " %0, %2\n"				\
>  		"	bne  %0, %z3, 1f\n"				\
> -		"	sc" sc_cas_sfx " %1, %z4, %2\n"			\
> +		"	sc" sc_sfx " %1, %z4, %2\n"			\
>  		"	bnez %1, 0b\n"					\
> -		append							\
> +		sc_append							\
>  		"1:\n"							\
>  		: "=&r" (r), "=&r" (__rc), "+A" (*(p))			\
>  		: "rJ" (co o), "rJ" (n)					\
> @@ -187,9 +189,7 @@ end:;									\
>  zacas:									\
>  	if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS)) {			\
>  		__asm__ __volatile__ (					\
> -			prepend						\
> -			"	amocas" sc_cas_sfx " %0, %z2, %1\n"	\
> -			append						\
> +			"	amocas" cas_sfx " %0, %z2, %1\n"	\
>  			: "+&r" (r), "+A" (*(p))			\
>  			: "rJ" (n)					\
>  			: "memory");					\
> @@ -197,7 +197,8 @@ zacas:									\
>  end:;									\
>  })
>  
> -#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append)		\
> +#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx,			\
> +		      sc_prepend, sc_append)				\
>  ({									\
>  	__typeof__(ptr) __ptr = (ptr);					\
>  	__typeof__(*(__ptr)) __old = (old);				\
> @@ -206,22 +207,24 @@ end:;									\
>  									\
>  	switch (sizeof(*__ptr)) {					\
>  	case 1:								\
> -		__arch_cmpxchg_masked(sc_sfx, ".b" sc_sfx,		\
> -					prepend, append,		\
> -					__ret, __ptr, __old, __new);    \
> +		__arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx,		\
> +				      sc_prepend, sc_append,		\
> +				      __ret, __ptr, __old, __new);	\
>  		break;							\
>  	case 2:								\
> -		__arch_cmpxchg_masked(sc_sfx, ".h" sc_sfx,		\
> -					prepend, append,		\
> -					__ret, __ptr, __old, __new);	\
> +		__arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx,		\
> +				      sc_prepend, sc_append,		\
> +				      __ret, __ptr, __old, __new);	\
>  		break;							\
>  	case 4:								\
> -		__arch_cmpxchg(".w", ".w" sc_sfx, prepend, append,	\
> -				__ret, __ptr, (long), __old, __new);	\
> +		__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx,		\
> +			       sc_prepend, sc_append,			\
> +			       __ret, __ptr, (long), __old, __new);	\
>  		break;							\
>  	case 8:								\
> -		__arch_cmpxchg(".d", ".d" sc_sfx, prepend, append,	\
> -				__ret, __ptr, /**/, __old, __new);	\
> +		__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx,		\
> +			       sc_prepend, sc_append,			\
> +			       __ret, __ptr, /**/, __old, __new);	\
>  		break;							\
>  	default:							\
>  		BUILD_BUG();						\
> @@ -230,16 +233,19 @@ end:;									\
>  })
>  
>  #define arch_cmpxchg_relaxed(ptr, o, n)					\
> -	_arch_cmpxchg((ptr), (o), (n), "", "", "")
> +	_arch_cmpxchg((ptr), (o), (n), "", "", "", "")
>  
>  #define arch_cmpxchg_acquire(ptr, o, n)					\
> -	_arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
> +	_arch_cmpxchg((ptr), (o), (n), "", ".aq",			\
> +		      "", RISCV_ACQUIRE_BARRIER)
>  
>  #define arch_cmpxchg_release(ptr, o, n)					\
> -	_arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
> +	_arch_cmpxchg((ptr), (o), (n), "", ".rl",			\
> +		      RISCV_RELEASE_BARRIER, "")
>  
>  #define arch_cmpxchg(ptr, o, n)						\
> -	_arch_cmpxchg((ptr), (o), (n), ".rl", "", "	fence rw, rw\n")
> +	_arch_cmpxchg((ptr), (o), (n), ".rl", ".aqrl",			\
> +		      "", RISCV_FULL_BARRIER)

... but this is not what I suggested: my suggestion [1] was about (limited
to) the fully-ordered macro arch_cmpxchg().  In fact, I've recently raised
some concern about similar changes to the acquire/release macros, cf. [2].

Any particular reasons for doing this?

  Andrea

[1] https://lore.kernel.org/lkml/ZlYff9x12FICHoP0@andrea/
[2] https://lore.kernel.org/lkml/20240505123340.38495-1-puranjay@kernel.org/
Alexandre Ghiti July 4, 2024, 4:40 p.m. UTC | #2
On 27/06/2024 15:31, Andrea Parri wrote:
> On Wed, Jun 26, 2024 at 03:03:41PM +0200, Alexandre Ghiti wrote:
>> cmpxchg() uses amocas.X instructions from Zacas and Zabha but still uses
>> the LR/SC acquire/release semantics which require barriers.
>>
>> Let's improve that by using proper amocas acquire/release semantics in
>> order to avoid any of those barriers.
> I can't really parse this changelog...
>
>
>> Suggested-by: Andrea Parri <andrea@rivosinc.com>
>> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
>> ---
>>   arch/riscv/include/asm/cmpxchg.h | 60 ++++++++++++++++++--------------
>>   1 file changed, 33 insertions(+), 27 deletions(-)
>>
>> diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
>> index b9a3fdcec919..3c65b00a0d36 100644
>> --- a/arch/riscv/include/asm/cmpxchg.h
>> +++ b/arch/riscv/include/asm/cmpxchg.h
>> @@ -105,7 +105,9 @@
>>    * indicated by comparing RETURN with OLD.
>>    */
>>   
>> -#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n)	\
>> +#define __arch_cmpxchg_masked(sc_sfx, cas_sfx,				\
>> +			      sc_prepend, sc_append,			\
>> +			      r, p, o, n)				\
>>   ({									\
>>   	__label__ no_zacas, zabha, end;					\
>>   									\
>> @@ -129,7 +131,7 @@ no_zacas:;								\
>>   	ulong __rc;							\
>>   									\
>>   	__asm__ __volatile__ (						\
>> -		prepend							\
>> +		sc_prepend							\
>>   		"0:	lr.w %0, %2\n"					\
>>   		"	and  %1, %0, %z5\n"				\
>>   		"	bne  %1, %z3, 1f\n"				\
>> @@ -137,7 +139,7 @@ no_zacas:;								\
>>   		"	or   %1, %1, %z4\n"				\
>>   		"	sc.w" sc_sfx " %1, %1, %2\n"			\
>>   		"	bnez %1, 0b\n"					\
>> -		append							\
>> +		sc_append							\
>>   		"1:\n"							\
>>   		: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b))	\
>>   		: "rJ" ((long)__oldx), "rJ" (__newx),			\
>> @@ -150,9 +152,7 @@ no_zacas:;								\
>>   zabha:									\
>>   	if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {			\
>>   		__asm__ __volatile__ (					\
>> -			prepend						\
>>   			"	amocas" cas_sfx " %0, %z2, %1\n"	\
>> -			append						\
>>   			: "+&r" (r), "+A" (*(p))			\
>>   			: "rJ" (n)					\
>>   			: "memory");					\
>> @@ -160,7 +160,9 @@ zabha:									\
>>   end:;									\
>>   })
>>   
>> -#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n)	\
>> +#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx,				\
>> +		       sc_prepend, sc_append,				\
>> +		       r, p, co, o, n)					\
>>   ({									\
>>   	__label__ zacas, end;						\
>>   	register unsigned int __rc;					\
>> @@ -172,12 +174,12 @@ end:;									\
>>   	}								\
>>   									\
>>   	__asm__ __volatile__ (						\
>> -		prepend							\
>> +		sc_prepend							\
>>   		"0:	lr" lr_sfx " %0, %2\n"				\
>>   		"	bne  %0, %z3, 1f\n"				\
>> -		"	sc" sc_cas_sfx " %1, %z4, %2\n"			\
>> +		"	sc" sc_sfx " %1, %z4, %2\n"			\
>>   		"	bnez %1, 0b\n"					\
>> -		append							\
>> +		sc_append							\
>>   		"1:\n"							\
>>   		: "=&r" (r), "=&r" (__rc), "+A" (*(p))			\
>>   		: "rJ" (co o), "rJ" (n)					\
>> @@ -187,9 +189,7 @@ end:;									\
>>   zacas:									\
>>   	if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS)) {			\
>>   		__asm__ __volatile__ (					\
>> -			prepend						\
>> -			"	amocas" sc_cas_sfx " %0, %z2, %1\n"	\
>> -			append						\
>> +			"	amocas" cas_sfx " %0, %z2, %1\n"	\
>>   			: "+&r" (r), "+A" (*(p))			\
>>   			: "rJ" (n)					\
>>   			: "memory");					\
>> @@ -197,7 +197,8 @@ zacas:									\
>>   end:;									\
>>   })
>>   
>> -#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append)		\
>> +#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx,			\
>> +		      sc_prepend, sc_append)				\
>>   ({									\
>>   	__typeof__(ptr) __ptr = (ptr);					\
>>   	__typeof__(*(__ptr)) __old = (old);				\
>> @@ -206,22 +207,24 @@ end:;									\
>>   									\
>>   	switch (sizeof(*__ptr)) {					\
>>   	case 1:								\
>> -		__arch_cmpxchg_masked(sc_sfx, ".b" sc_sfx,		\
>> -					prepend, append,		\
>> -					__ret, __ptr, __old, __new);    \
>> +		__arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx,		\
>> +				      sc_prepend, sc_append,		\
>> +				      __ret, __ptr, __old, __new);	\
>>   		break;							\
>>   	case 2:								\
>> -		__arch_cmpxchg_masked(sc_sfx, ".h" sc_sfx,		\
>> -					prepend, append,		\
>> -					__ret, __ptr, __old, __new);	\
>> +		__arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx,		\
>> +				      sc_prepend, sc_append,		\
>> +				      __ret, __ptr, __old, __new);	\
>>   		break;							\
>>   	case 4:								\
>> -		__arch_cmpxchg(".w", ".w" sc_sfx, prepend, append,	\
>> -				__ret, __ptr, (long), __old, __new);	\
>> +		__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx,		\
>> +			       sc_prepend, sc_append,			\
>> +			       __ret, __ptr, (long), __old, __new);	\
>>   		break;							\
>>   	case 8:								\
>> -		__arch_cmpxchg(".d", ".d" sc_sfx, prepend, append,	\
>> -				__ret, __ptr, /**/, __old, __new);	\
>> +		__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx,		\
>> +			       sc_prepend, sc_append,			\
>> +			       __ret, __ptr, /**/, __old, __new);	\
>>   		break;							\
>>   	default:							\
>>   		BUILD_BUG();						\
>> @@ -230,16 +233,19 @@ end:;									\
>>   })
>>   
>>   #define arch_cmpxchg_relaxed(ptr, o, n)					\
>> -	_arch_cmpxchg((ptr), (o), (n), "", "", "")
>> +	_arch_cmpxchg((ptr), (o), (n), "", "", "", "")
>>   
>>   #define arch_cmpxchg_acquire(ptr, o, n)					\
>> -	_arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
>> +	_arch_cmpxchg((ptr), (o), (n), "", ".aq",			\
>> +		      "", RISCV_ACQUIRE_BARRIER)
>>   
>>   #define arch_cmpxchg_release(ptr, o, n)					\
>> -	_arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
>> +	_arch_cmpxchg((ptr), (o), (n), "", ".rl",			\
>> +		      RISCV_RELEASE_BARRIER, "")
>>   
>>   #define arch_cmpxchg(ptr, o, n)						\
>> -	_arch_cmpxchg((ptr), (o), (n), ".rl", "", "	fence rw, rw\n")
>> +	_arch_cmpxchg((ptr), (o), (n), ".rl", ".aqrl",			\
>> +		      "", RISCV_FULL_BARRIER)
> ... but this is not what I suggested: my suggestion [1] was about (limited
> to) the fully-ordered macro arch_cmpxchg().  In fact, I've recently raised
> some concern about similar changes to the acquire/release macros, cf. [2].
>
> Any particular reasons for doing this?


Not at all, I overinterpreted your suggestion, I'll restrict this to the 
fully-ordered macro then.

Thanks,


>
>    Andrea
>
> [1] https://lore.kernel.org/lkml/ZlYff9x12FICHoP0@andrea/
> [2] https://lore.kernel.org/lkml/20240505123340.38495-1-puranjay@kernel.org/
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index b9a3fdcec919..3c65b00a0d36 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -105,7 +105,9 @@ 
  * indicated by comparing RETURN with OLD.
  */
 
-#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n)	\
+#define __arch_cmpxchg_masked(sc_sfx, cas_sfx,				\
+			      sc_prepend, sc_append,			\
+			      r, p, o, n)				\
 ({									\
 	__label__ no_zacas, zabha, end;					\
 									\
@@ -129,7 +131,7 @@  no_zacas:;								\
 	ulong __rc;							\
 									\
 	__asm__ __volatile__ (						\
-		prepend							\
+		sc_prepend							\
 		"0:	lr.w %0, %2\n"					\
 		"	and  %1, %0, %z5\n"				\
 		"	bne  %1, %z3, 1f\n"				\
@@ -137,7 +139,7 @@  no_zacas:;								\
 		"	or   %1, %1, %z4\n"				\
 		"	sc.w" sc_sfx " %1, %1, %2\n"			\
 		"	bnez %1, 0b\n"					\
-		append							\
+		sc_append							\
 		"1:\n"							\
 		: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b))	\
 		: "rJ" ((long)__oldx), "rJ" (__newx),			\
@@ -150,9 +152,7 @@  no_zacas:;								\
 zabha:									\
 	if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {			\
 		__asm__ __volatile__ (					\
-			prepend						\
 			"	amocas" cas_sfx " %0, %z2, %1\n"	\
-			append						\
 			: "+&r" (r), "+A" (*(p))			\
 			: "rJ" (n)					\
 			: "memory");					\
@@ -160,7 +160,9 @@  zabha:									\
 end:;									\
 })
 
-#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n)	\
+#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx,				\
+		       sc_prepend, sc_append,				\
+		       r, p, co, o, n)					\
 ({									\
 	__label__ zacas, end;						\
 	register unsigned int __rc;					\
@@ -172,12 +174,12 @@  end:;									\
 	}								\
 									\
 	__asm__ __volatile__ (						\
-		prepend							\
+		sc_prepend							\
 		"0:	lr" lr_sfx " %0, %2\n"				\
 		"	bne  %0, %z3, 1f\n"				\
-		"	sc" sc_cas_sfx " %1, %z4, %2\n"			\
+		"	sc" sc_sfx " %1, %z4, %2\n"			\
 		"	bnez %1, 0b\n"					\
-		append							\
+		sc_append							\
 		"1:\n"							\
 		: "=&r" (r), "=&r" (__rc), "+A" (*(p))			\
 		: "rJ" (co o), "rJ" (n)					\
@@ -187,9 +189,7 @@  end:;									\
 zacas:									\
 	if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS)) {			\
 		__asm__ __volatile__ (					\
-			prepend						\
-			"	amocas" sc_cas_sfx " %0, %z2, %1\n"	\
-			append						\
+			"	amocas" cas_sfx " %0, %z2, %1\n"	\
 			: "+&r" (r), "+A" (*(p))			\
 			: "rJ" (n)					\
 			: "memory");					\
@@ -197,7 +197,8 @@  zacas:									\
 end:;									\
 })
 
-#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append)		\
+#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx,			\
+		      sc_prepend, sc_append)				\
 ({									\
 	__typeof__(ptr) __ptr = (ptr);					\
 	__typeof__(*(__ptr)) __old = (old);				\
@@ -206,22 +207,24 @@  end:;									\
 									\
 	switch (sizeof(*__ptr)) {					\
 	case 1:								\
-		__arch_cmpxchg_masked(sc_sfx, ".b" sc_sfx,		\
-					prepend, append,		\
-					__ret, __ptr, __old, __new);    \
+		__arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx,		\
+				      sc_prepend, sc_append,		\
+				      __ret, __ptr, __old, __new);	\
 		break;							\
 	case 2:								\
-		__arch_cmpxchg_masked(sc_sfx, ".h" sc_sfx,		\
-					prepend, append,		\
-					__ret, __ptr, __old, __new);	\
+		__arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx,		\
+				      sc_prepend, sc_append,		\
+				      __ret, __ptr, __old, __new);	\
 		break;							\
 	case 4:								\
-		__arch_cmpxchg(".w", ".w" sc_sfx, prepend, append,	\
-				__ret, __ptr, (long), __old, __new);	\
+		__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx,		\
+			       sc_prepend, sc_append,			\
+			       __ret, __ptr, (long), __old, __new);	\
 		break;							\
 	case 8:								\
-		__arch_cmpxchg(".d", ".d" sc_sfx, prepend, append,	\
-				__ret, __ptr, /**/, __old, __new);	\
+		__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx,		\
+			       sc_prepend, sc_append,			\
+			       __ret, __ptr, /**/, __old, __new);	\
 		break;							\
 	default:							\
 		BUILD_BUG();						\
@@ -230,16 +233,19 @@  end:;									\
 })
 
 #define arch_cmpxchg_relaxed(ptr, o, n)					\
-	_arch_cmpxchg((ptr), (o), (n), "", "", "")
+	_arch_cmpxchg((ptr), (o), (n), "", "", "", "")
 
 #define arch_cmpxchg_acquire(ptr, o, n)					\
-	_arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
+	_arch_cmpxchg((ptr), (o), (n), "", ".aq",			\
+		      "", RISCV_ACQUIRE_BARRIER)
 
 #define arch_cmpxchg_release(ptr, o, n)					\
-	_arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
+	_arch_cmpxchg((ptr), (o), (n), "", ".rl",			\
+		      RISCV_RELEASE_BARRIER, "")
 
 #define arch_cmpxchg(ptr, o, n)						\
-	_arch_cmpxchg((ptr), (o), (n), ".rl", "", "	fence rw, rw\n")
+	_arch_cmpxchg((ptr), (o), (n), ".rl", ".aqrl",			\
+		      "", RISCV_FULL_BARRIER)
 
 #define arch_cmpxchg_local(ptr, o, n)					\
 	arch_cmpxchg_relaxed((ptr), (o), (n))