diff mbox series

[RFC,08/11] target/ppc: implement address swizzle for gen_ld_atomic()

Message ID 20241212151412.570454-9-mark.cave-ayland@ilande.co.uk (mailing list archive)
State New
Headers show
Series target/ppc: implement legacy address-swizzling MSR_LE support | expand

Commit Message

Mark Cave-Ayland Dec. 12, 2024, 3:14 p.m. UTC
The gen_ld_atomic() function uses a number of TCG atomic primitives within its
implementation. Update gen_ld_atomic() so that it implements the address swizzle
if required.

Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
---
 target/ppc/translate.c | 81 +++++++++++++++++++++++++++++++++++++-----
 1 file changed, 72 insertions(+), 9 deletions(-)

Comments

Richard Henderson Dec. 12, 2024, 4:20 p.m. UTC | #1
On 12/12/24 09:14, Mark Cave-Ayland wrote:
> The gen_ld_atomic() function uses a number of TCG atomic primitives within its
> implementation. Update gen_ld_atomic() so that it implements the address swizzle
> if required.
> 
> Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
> ---
>   target/ppc/translate.c | 81 +++++++++++++++++++++++++++++++++++++-----
>   1 file changed, 72 insertions(+), 9 deletions(-)

These are all Power ISA 3.0 instructions.
There will be no swizzling.

r~
diff mbox series

Patch

diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 74aa398f25..b549525bb6 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -3039,31 +3039,94 @@  static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
     memop |= MO_ALIGN;
     switch (gpr_FC) {
     case 0: /* Fetch and add */
-        tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
+        if (need_addrswizzle_le(ctx)) {
+            TCGv ta = tcg_temp_new();
+
+            gen_addr_swizzle_le(ta, EA, memop);
+            tcg_gen_atomic_fetch_add_tl(dst, ta, src, ctx->mem_idx, memop);
+        } else {
+            tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
+        }
         break;
     case 1: /* Fetch and xor */
-        tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
+        if (need_addrswizzle_le(ctx)) {
+            TCGv ta = tcg_temp_new();
+
+            gen_addr_swizzle_le(ta, EA, memop);
+            tcg_gen_atomic_fetch_xor_tl(dst, ta, src, ctx->mem_idx, memop);
+        } else {
+            tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
+        }
         break;
     case 2: /* Fetch and or */
-        tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
+        if (need_addrswizzle_le(ctx)) {
+            TCGv ta = tcg_temp_new();
+
+            gen_addr_swizzle_le(ta, EA, memop);
+            tcg_gen_atomic_fetch_or_tl(dst, ta, src, ctx->mem_idx, memop);
+        } else {
+            tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
+        }
         break;
     case 3: /* Fetch and 'and' */
-        tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
+        if (need_addrswizzle_le(ctx)) {
+            TCGv ta = tcg_temp_new();
+
+            gen_addr_swizzle_le(ta, EA, memop);
+            tcg_gen_atomic_fetch_and_tl(dst, ta, src, ctx->mem_idx, memop);
+        } else {
+            tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
+        }
         break;
     case 4:  /* Fetch and max unsigned */
-        tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
+        if (need_addrswizzle_le(ctx)) {
+            TCGv ta = tcg_temp_new();
+
+            gen_addr_swizzle_le(ta, EA, memop);
+            tcg_gen_atomic_fetch_umax_tl(dst, ta, src, ctx->mem_idx, memop);
+        } else {
+            tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
+        }
         break;
     case 5:  /* Fetch and max signed */
-        tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
+        if (need_addrswizzle_le(ctx)) {
+            TCGv ta = tcg_temp_new();
+
+            gen_addr_swizzle_le(ta, EA, memop);
+            tcg_gen_atomic_fetch_smax_tl(dst, ta, src, ctx->mem_idx, memop);
+        } else {
+            tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
+        }
         break;
     case 6:  /* Fetch and min unsigned */
-        tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
+        if (need_addrswizzle_le(ctx)) {
+            TCGv ta = tcg_temp_new();
+
+            gen_addr_swizzle_le(ta, EA, memop);
+            tcg_gen_atomic_fetch_umin_tl(dst, ta, src, ctx->mem_idx, memop);
+        } else {
+            tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
+        }
         break;
     case 7:  /* Fetch and min signed */
-        tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
+        if (need_addrswizzle_le(ctx)) {
+            TCGv ta = tcg_temp_new();
+
+            gen_addr_swizzle_le(ta, EA, memop);
+            tcg_gen_atomic_fetch_smin_tl(dst, ta, src, ctx->mem_idx, memop);
+        } else {
+            tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
+        }
         break;
     case 8: /* Swap */
-        tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
+        if (need_addrswizzle_le(ctx)) {
+            TCGv ta = tcg_temp_new();
+
+            gen_addr_swizzle_le(ta, EA, memop);
+            tcg_gen_atomic_xchg_tl(dst, ta, src, ctx->mem_idx, memop);
+        } else {
+            tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
+        }
         break;
 
     case 16: /* Compare and swap not equal */