@@ -428,6 +428,19 @@ extern void raw_cmpxchg128_relaxed_not_implemented(void);
#define raw_sync_cmpxchg arch_sync_cmpxchg
+#ifdef arch_sync_try_cmpxchg
+#define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
+#else
+#define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
/**
* raw_atomic_read() - atomic load with relaxed ordering
* @v: pointer to atomic_t
@@ -4649,4 +4662,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 2fdd6702823fa842f9cea57a002e6e4476ae780c
+// eec048affea735b8464f58e6d96992101f8f85f1
@@ -4998,6 +4998,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
+#define sync_try_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_sync_try_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// 1568f875fef72097413caab8339120c065a39aa4
+// 2cc4bc990fef44d3836ec108f11b610f3f438184
@@ -223,14 +223,15 @@ gen_xchg_fallbacks()
gen_try_cmpxchg_fallback()
{
+ local prefix="$1"; shift
local cmpxchg="$1"; shift;
- local order="$1"; shift;
+ local suffix="$1"; shift;
cat <<EOF
-#define raw_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\
+#define raw_${prefix}try_${cmpxchg}${suffix}(_ptr, _oldp, _new) \\
({ \\
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\
- ___r = raw_${cmpxchg}${order}((_ptr), ___o, (_new)); \\
+ ___r = raw_${prefix}${cmpxchg}${suffix}((_ptr), ___o, (_new)); \\
if (unlikely(___r != ___o)) \\
*___op = ___r; \\
likely(___r == ___o); \\
@@ -259,11 +260,11 @@ gen_try_cmpxchg_order_fallback()
fi
printf "#else\n"
- gen_try_cmpxchg_fallback "${cmpxchg}" "${order}"
+ gen_try_cmpxchg_fallback "" "${cmpxchg}" "${order}"
printf "#endif\n\n"
}
-gen_try_cmpxchg_fallbacks()
+gen_try_cmpxchg_order_fallbacks()
{
local cmpxchg="$1"; shift;
@@ -272,15 +273,17 @@ gen_try_cmpxchg_fallbacks()
done
}
-gen_cmpxchg_local_fallbacks()
+gen_def_and_try_cmpxchg_fallback()
{
+ local prefix="$1"; shift
local cmpxchg="$1"; shift
+ local suffix="$1"; shift
- printf "#define raw_${cmpxchg} arch_${cmpxchg}\n\n"
- printf "#ifdef arch_try_${cmpxchg}\n"
- printf "#define raw_try_${cmpxchg} arch_try_${cmpxchg}\n"
+ printf "#define raw_${prefix}${cmpxchg}${suffix} arch_${prefix}${cmpxchg}${suffix}\n\n"
+ printf "#ifdef arch_${prefix}try_${cmpxchg}${suffix}\n"
+ printf "#define raw_${prefix}try_${cmpxchg}${suffix} arch_${prefix}try_${cmpxchg}${suffix}\n"
printf "#else\n"
- gen_try_cmpxchg_fallback "${cmpxchg}" ""
+ gen_try_cmpxchg_fallback "${prefix}" "${cmpxchg}" "${suffix}"
printf "#endif\n\n"
}
@@ -302,15 +305,15 @@ for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128"; do
done
for cmpxchg in "cmpxchg" "cmpxchg64" "cmpxchg128"; do
- gen_try_cmpxchg_fallbacks "${cmpxchg}"
+ gen_try_cmpxchg_order_fallbacks "${cmpxchg}"
done
-for cmpxchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local"; do
- gen_cmpxchg_local_fallbacks "${cmpxchg}" ""
+for cmpxchg in "cmpxchg" "cmpxchg64" "cmpxchg128"; do
+ gen_def_and_try_cmpxchg_fallback "" "${cmpxchg}" "_local"
done
-for cmpxchg in "sync_cmpxchg"; do
- printf "#define raw_${cmpxchg} arch_${cmpxchg}\n\n"
+for cmpxchg in "cmpxchg"; do
+ gen_def_and_try_cmpxchg_fallback "sync_" "${cmpxchg}" ""
done
grep '^[a-z]' "$1" | while read name meta args; do
@@ -169,7 +169,8 @@ for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg
done
done
-for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" "try_cmpxchg128_local"; do
+for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg" \
+ "try_cmpxchg_local" "try_cmpxchg64_local" "try_cmpxchg128_local" "sync_try_cmpxchg"; do
gen_xchg "${xchg}" ""
printf "\n"
done
Provide the generic sync_try_cmpxchg function from the raw_ prefixed version, also adding explicit instrumentation. The patch amends existing scripts to generate sync_try_cmpxchg locking primitive and its raw_sync_try_cmpxchg fallback, while leaving existing macros from try_cmpxchg family unchanged. The target can define its own arch_sync_try_cmpxchg to override the generic version of raw_sync_try_cmpxchg. This allows the target to generate more optimal assembly than the generic version. Additionally, the patch renames two scripts to better reflect whet they really do. Cc: Will Deacon <will@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Uros Bizjak <ubizjak@gmail.com> --- v2: Improve commit description. --- include/linux/atomic/atomic-arch-fallback.h | 15 +++++++++- include/linux/atomic/atomic-instrumented.h | 10 ++++++- scripts/atomic/gen-atomic-fallback.sh | 33 +++++++++++---------- scripts/atomic/gen-atomic-instrumented.sh | 3 +- 4 files changed, 43 insertions(+), 18 deletions(-)