@@ -33,5 +33,5 @@ C_O1_I2(w, w, w)
C_O1_I2(w, w, wM)
C_O1_I2(w, w, wA)
C_O1_I3(w, w, w, w)
-C_O1_I4(r, rz, rJ, rz, rz)
+C_O1_I4(r, r, rJ, rz, rz)
C_N2_I1(r, r, r)
@@ -23,7 +23,8 @@ C_O1_I2(r, r, rIK)
C_O1_I2(r, r, rJ)
C_O1_I2(r, r, rz)
C_O1_I2(r, r, rzW)
-C_O1_I4(r, rz, rz, rz, 0)
+C_O1_I4(r, r, rz, rz, 0)
+C_O1_I4(r, r, rz, rz, rz)
C_O1_I4(r, rz, rz, rz, rz)
C_O2_I1(r, r, r)
C_O2_I2(r, r, r, r)
@@ -38,7 +38,6 @@ C_O1_I2(v, v, v)
C_O1_I3(v, v, v, v)
C_O1_I4(v, v, v, vZ, v)
C_O1_I4(v, v, v, vZM, v)
-C_O1_I4(r, r, ri, rI, r)
C_O1_I4(r, r, rC, rI, r)
C_O2_I1(o, m, r)
C_O2_I2(o, m, 0, r)
@@ -15,6 +15,6 @@ C_O0_I2(r, rJ)
C_O1_I1(r, r)
C_O1_I2(r, r, r)
C_O1_I2(r, r, rJ)
-C_O1_I4(r, rz, rJ, rI, 0)
+C_O1_I4(r, r, rJ, rI, 0)
C_O2_I2(r, r, r, r)
C_O2_I4(r, r, rz, rz, rJ, rJ)
@@ -1000,6 +1000,13 @@ typedef struct TCGOutOpDivRem {
TCGReg a0, TCGReg a1, TCGReg a4);
} TCGOutOpDivRem;
+typedef struct TCGOutOpMovcond {
+ TCGOutOp base;
+ void (*out)(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf);
+} TCGOutOpMovcond;
+
typedef struct TCGOutOpMul2 {
TCGOutOp base;
void (*out_rrrr)(TCGContext *s, TCGType type,
@@ -1057,6 +1064,8 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
+ OUTOP(INDEX_op_movcond_i32, TCGOutOpMovcond, outop_movcond),
+ OUTOP(INDEX_op_movcond_i64, TCGOutOpMovcond, outop_movcond),
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
@@ -5492,6 +5501,20 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
}
break;
+ case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
+ {
+ const TCGOutOpMovcond *out = &outop_movcond;
+ TCGCond cond = new_args[5];
+
+ tcg_debug_assert(!const_args[1]);
+ out->out(s, type, cond, new_args[0],
+ new_args[1], new_args[2], const_args[2],
+ new_args[3], const_args[3],
+ new_args[4], const_args[4]);
+ }
+ break;
+
case INDEX_op_setcond:
case INDEX_op_negsetcond:
{
@@ -438,11 +438,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_l(insn, tb_ptr, &ptr);
tb_ptr = ptr;
continue;
- case INDEX_op_movcond_i32:
- tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
- tmp32 = tci_compare32(regs[r1], regs[r2], condition);
- regs[r0] = regs[tmp32 ? r3 : r4];
- break;
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_setcond2_i32:
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
@@ -628,6 +623,11 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
break;
+ case INDEX_op_tci_movcond32:
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
+ tmp32 = tci_compare32(regs[r1], regs[r2], condition);
+ regs[r0] = regs[tmp32 ? r3 : r4];
+ break;
/* Shift/rotate operations. */
@@ -1074,7 +1074,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
op_name, str_r(r0), str_r(r1), pos, len);
break;
- case INDEX_op_movcond_i32:
+ case INDEX_op_tci_movcond32:
case INDEX_op_movcond_i64:
case INDEX_op_setcond2_i32:
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
@@ -2513,6 +2513,19 @@ static const TCGOutOpSetcond outop_negsetcond = {
.out_rri = tgen_negsetcondi,
};
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg vt, bool const_vt, TCGArg vf, bool const_vf)
+{
+ tcg_out_cmp(s, type, cond, c1, c2, const_c2);
+ tcg_out_insn(s, 3506, CSEL, type, ret, vt, vf, cond);
+}
+
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rC, rz, rz),
+ .out = tgen_movcond,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -2521,7 +2534,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
TCGArg a0 = args[0];
TCGArg a1 = args[1];
TCGArg a2 = args[2];
- int c2 = const_args[2];
switch (opc) {
case INDEX_op_goto_ptr:
@@ -2579,14 +2591,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
break;
- case INDEX_op_movcond_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_movcond_i64:
- tcg_out_cmp(s, ext, args[5], a1, a2, c2);
- tcg_out_insn(s, 3506, CSEL, ext, a0, args[3], args[4], args[5]);
- break;
-
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, a0, a1, a2, ext);
@@ -3166,10 +3170,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st_i64:
return C_O0_I2(rz, r);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rC, rz, rz);
-
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
return C_O1_I1(r, r);
@@ -2253,6 +2253,20 @@ static const TCGOutOpSetcond outop_negsetcond = {
.out_rri = tgen_negsetcondi,
};
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf)
+{
+ cond = tcg_out_cmp(s, cond, c1, c2, const_c2);
+ tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[cond], ARITH_MOV, ARITH_MVN,
+ ret, 0, vt, const_vt);
+}
+
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rIN, rIK, 0),
+ .out = tgen_movcond,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
@@ -2294,14 +2308,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
break;
- case INDEX_op_movcond_i32:
- /* Constraints mean that v2 is always in the same register as dest,
- * so we only need to do "if condition passed, move v1 to dest".
- */
- c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]);
- tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
- ARITH_MVN, args[0], 0, args[3], const_args[3]);
- break;
case INDEX_op_add2_i32:
a0 = args[0], a1 = args[1], a2 = args[2];
a3 = args[3], a4 = args[4], a5 = args[5];
@@ -2446,8 +2452,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
return C_O1_I2(r, 0, rZ);
case INDEX_op_extract2_i32:
return C_O1_I2(r, rZ, rZ);
- case INDEX_op_movcond_i32:
- return C_O1_I4(r, r, rIN, rIK, 0);
case INDEX_op_add2_i32:
return C_O2_I4(r, r, r, r, rIN, rIK);
case INDEX_op_sub2_i32:
@@ -1900,14 +1900,21 @@ static void tcg_out_cmov(TCGContext *s, int jcc, int rexw,
tcg_out_modrm(s, OPC_CMOVCC | jcc | rexw, dest, v1);
}
-static void tcg_out_movcond(TCGContext *s, int rexw, TCGCond cond,
- TCGReg dest, TCGReg c1, TCGArg c2, int const_c2,
- TCGReg v1)
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg vt, bool const_vt,
+ TCGArg vf, bool consf_vf)
{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
int jcc = tcg_out_cmp(s, cond, c1, c2, const_c2, rexw);
- tcg_out_cmov(s, jcc, rexw, dest, v1);
+ tcg_out_cmov(s, jcc, rexw, dest, vt);
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, reT, r, 0),
+ .out = tgen_movcond,
+};
+
static void tcg_out_branch(TCGContext *s, int call, const tcg_insn_unit *dest)
{
intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
@@ -3144,10 +3151,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
}
break;
- OP_32_64(movcond):
- tcg_out_movcond(s, rexw, args[5], a0, a1, a2, const_a2, args[3]);
- break;
-
OP_32_64(bswap16):
if (a2 & TCG_BSWAP_OS) {
/* Output must be sign-extended. */
@@ -3977,10 +3980,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_deposit_i64:
return C_O1_I2(q, 0, qi);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, reT, r, 0);
-
case INDEX_op_add2_i32:
case INDEX_op_add2_i64:
case INDEX_op_sub2_i32:
@@ -711,11 +711,11 @@ static const TCGOutOpSetcond outop_negsetcond = {
.out_rri = tgen_negsetcondi,
};
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, tcg_target_long c2, bool const2,
- TCGReg v1, TCGReg v2)
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg v1, bool const_v1, TCGArg v2, bool const_v2)
{
- int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
+ int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const_c2);
TCGReg t;
/* Standardize the test below to t != 0. */
@@ -735,6 +735,11 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rJ, rz, rz),
+ .out = tgen_movcond,
+};
+
/*
* Branch helpers
*/
@@ -1747,7 +1752,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
TCGArg a1 = args[1];
TCGArg a2 = args[2];
TCGArg a3 = args[3];
- int c2 = const_args[2];
switch (opc) {
case INDEX_op_mb:
@@ -1837,11 +1841,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_opc_revb_d(s, a0, a1);
break;
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
- break;
-
case INDEX_op_ld8s_i32:
case INDEX_op_ld8s_i64:
tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
@@ -2469,10 +2468,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
/* Must deposit into the same register as input */
return C_O1_I2(r, 0, rz);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, rz, rJ, rz, rz);
-
case INDEX_op_ld_vec:
case INDEX_op_dupm_vec:
case INDEX_op_dup_vec:
@@ -1086,8 +1086,9 @@ static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
tcg_out_nop(s);
}
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, TCGReg c2, TCGReg v1, TCGReg v2)
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg v1, bool const_v1, TCGArg v2, bool const_v2)
{
int tmpflags;
bool eqz;
@@ -1133,6 +1134,13 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = (use_mips32r6_instructions
+ ? C_O1_I4(r, r, rz, rz, rz)
+ : C_O1_I4(r, r, rz, rz, 0)),
+ .out = tgen_movcond,
+};
+
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
{
/*
@@ -1726,7 +1734,8 @@ static void tgen_clz(TCGContext *s, TCGType type,
if (use_mips32r6_instructions) {
MIPSInsn opcv6 = type == TCG_TYPE_I32 ? OPC_CLZ_R6 : OPC_DCLZ_R6;
tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0);
- tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0);
+ tgen_movcond(s, TCG_TYPE_REG, TCG_COND_EQ, a0, a1, a2, false,
+ TCG_TMP0, false, TCG_REG_ZERO, false);
} else {
MIPSInsn opcv2 = type == TCG_TYPE_I32 ? OPC_CLZ : OPC_DCLZ;
if (a0 == a2) {
@@ -2298,11 +2307,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
break;
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]);
- break;
-
case INDEX_op_setcond2_i32:
tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
break;
@@ -2398,11 +2402,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
return C_O1_I2(r, 0, rz);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return (use_mips32r6_instructions
- ? C_O1_I4(r, rz, rz, rz, rz)
- : C_O1_I4(r, rz, rz, rz, 0));
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
return C_O2_I4(r, r, rz, rz, rN, rN);
@@ -2144,9 +2144,9 @@ static const TCGOutOpBrcond outop_brcond = {
.out_ri = tgen_brcondi,
};
-static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
- TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
- TCGArg v2, bool const_c2)
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg v1, bool const_v1, TCGArg v2, bool const_v2)
{
/* If for some reason both inputs are zero, don't produce bad code. */
if (v1 == 0 && v2 == 0) {
@@ -2192,6 +2192,11 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
}
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rC, rZ, rZ),
+ .out = tgen_movcond,
+};
+
static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
{
@@ -3578,15 +3583,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
}
break;
- case INDEX_op_movcond_i32:
- tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
- args[3], args[4], const_args[2]);
- break;
- case INDEX_op_movcond_i64:
- tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
- args[3], args[4], const_args[2]);
- break;
-
#if TCG_TARGET_REG_BITS == 64
case INDEX_op_add2_i64:
#else
@@ -4297,10 +4293,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st_i64:
return C_O0_I2(r, r);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rC, rZ, rZ);
-
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
return C_O1_I2(r, 0, rZ);
@@ -1501,10 +1501,10 @@ static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret,
tcg_out_mov(s, TCG_TYPE_REG, ret, tmp);
}
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg cmp1, int cmp2, bool c_cmp2,
- TCGReg val1, bool c_val1,
- TCGReg val2, bool c_val2)
+static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg cmp1, TCGArg cmp2, bool c_cmp2,
+ TCGArg val1, bool c_val1,
+ TCGArg val2, bool c_val2)
{
int tmpflags;
TCGReg t;
@@ -1531,6 +1531,11 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rI, rM, rM),
+ .out = tcg_out_movcond,
+};
+
static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
TCGReg ret, TCGReg src1, int src2, bool c_src2)
{
@@ -1542,7 +1547,7 @@ static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
* Note that constraints put 'ret' in a new register, so the
* computation above did not clobber either 'src1' or 'src2'.
*/
- tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true,
+ tcg_out_movcond(s, type, TCG_COND_EQ, ret, src1, 0, true,
src2, c_src2, ret, false);
}
}
@@ -2425,7 +2430,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
TCGArg a0 = args[0];
TCGArg a1 = args[1];
TCGArg a2 = args[2];
- int c2 = const_args[2];
switch (opc) {
case INDEX_op_goto_ptr:
@@ -2521,12 +2525,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const_args[4], const_args[5], true, false);
break;
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- tcg_out_movcond(s, args[5], a0, a1, a2, c2,
- args[3], const_args[3], args[4], const_args[4]);
- break;
-
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
break;
@@ -2863,10 +2861,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st_i64:
return C_O0_I2(rz, r);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rI, rM, rM);
-
case INDEX_op_add2_i32:
case INDEX_op_add2_i64:
case INDEX_op_sub2_i32:
@@ -1540,9 +1540,9 @@ static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
tcg_out_insn(s, RRFc, LOCGR, dest, src, cc);
}
-static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
- TCGReg c1, TCGArg c2, int c2const,
- TCGArg v3, int v3const, TCGReg v4)
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c,
+ TCGReg dest, TCGReg c1, TCGArg c2, bool c2const,
+ TCGArg v3, bool v3const, TCGArg v4, bool v4const)
{
int cc, inv_cc;
@@ -1550,6 +1550,11 @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rC, rI, r),
+ .out = tgen_movcond,
+};
+
static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
int ofs, int len, int z)
{
@@ -2875,11 +2880,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
break;
- case INDEX_op_movcond_i32:
- tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
- args[2], const_args[2], args[3], const_args[3], args[4]);
- break;
-
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
break;
@@ -2948,11 +2948,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
break;
- case INDEX_op_movcond_i64:
- tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
- args[2], const_args[2], args[3], const_args[3], args[4]);
- break;
-
OP_32_64(deposit):
a0 = args[0], a1 = args[1], a2 = args[2];
if (const_args[1]) {
@@ -3492,11 +3487,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_deposit_i64:
return C_O1_I2(r, rZ, r);
- case INDEX_op_movcond_i32:
- return C_O1_I4(r, r, ri, rI, r);
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rC, rI, r);
-
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
return C_N1_O1_I4(r, r, 0, 1, ri, r);
@@ -898,6 +898,22 @@ static const TCGOutOpSetcond outop_negsetcond = {
.out_rri = tgen_negsetcondi,
};
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool c2const,
+ TCGArg v1, bool v1const, TCGArg v2, bool v2consf)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_movcond_i32(s, cond, ret, c1, c2, c2const, v1, v1const);
+ } else {
+ tcg_out_movcond_i64(s, cond, ret, c1, c2, c2const, v1, v1const);
+ }
+}
+
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rJ, rI, 0),
+ .out = tgen_movcond,
+};
+
static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
TCGReg al, TCGReg ah, int32_t bl, int blconst,
int32_t bh, int bhconst, int opl, int oph)
@@ -1735,13 +1751,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const int const_args[TCG_MAX_OP_ARGS])
{
TCGArg a0, a1, a2;
- int c2;
/* Hoist the loads of the most common arguments. */
a0 = args[0];
a1 = args[1];
a2 = args[2];
- c2 = const_args[2];
switch (opc) {
case INDEX_op_goto_ptr:
@@ -1784,10 +1798,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_ldst(s, a0, a1, a2, STW);
break;
- case INDEX_op_movcond_i32:
- tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
- break;
-
case INDEX_op_add2_i32:
tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
args[4], const_args[4], args[5], const_args[5],
@@ -1822,9 +1832,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_ldst(s, a0, a1, a2, STX);
break;
- case INDEX_op_movcond_i64:
- tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
- break;
case INDEX_op_add2_i64:
tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
const_args[4], args[5], const_args[5], false);
@@ -1895,9 +1902,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_qemu_st_i64:
return C_O0_I2(rz, r);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, rz, rJ, rI, 0);
case INDEX_op_add2_i32:
case INDEX_op_add2_i64:
case INDEX_op_sub2_i32:
@@ -11,3 +11,4 @@ DEF(tci_remu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
DEF(tci_rotl32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
DEF(tci_rotr32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
DEF(tci_setcond32, 1, 2, 1, TCG_OPF_NOT_PRESENT)
+DEF(tci_movcond32, 1, 2, 1, TCG_OPF_NOT_PRESENT)
@@ -92,8 +92,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
return C_O0_I4(r, r, r, r);
#endif
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
case INDEX_op_setcond2_i32:
return C_O1_I4(r, r, r, r, r);
@@ -972,6 +970,21 @@ static const TCGOutOpBrcond outop_brcond = {
.out_rr = tgen_brcond,
};
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
+ TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf)
+{
+ TCGOpcode opc = (type == TCG_TYPE_I32
+ ? INDEX_op_tci_movcond32
+ : INDEX_op_movcond_i64);
+ tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond);
+}
+
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, r, r, r),
+ .out = tgen_movcond,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -987,7 +1000,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_op_l(s, opc, arg_label(args[0]));
break;
- CASE_32_64(movcond)
case INDEX_op_setcond2_i32:
tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
args[3], args[4], args[5]);
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/loongarch64/tcg-target-con-set.h | 2 +- tcg/mips/tcg-target-con-set.h | 3 ++- tcg/s390x/tcg-target-con-set.h | 1 - tcg/sparc64/tcg-target-con-set.h | 2 +- tcg/tcg.c | 23 +++++++++++++++++++++++ tcg/tci.c | 12 ++++++------ tcg/aarch64/tcg-target.c.inc | 26 +++++++++++++------------- tcg/arm/tcg-target.c.inc | 24 ++++++++++++++---------- tcg/i386/tcg-target.c.inc | 23 +++++++++++------------ tcg/loongarch64/tcg-target.c.inc | 23 +++++++++-------------- tcg/mips/tcg-target.c.inc | 25 ++++++++++++------------- tcg/ppc/tcg-target.c.inc | 24 ++++++++---------------- tcg/riscv/tcg-target.c.inc | 26 ++++++++++---------------- tcg/s390x/tcg-target.c.inc | 26 ++++++++------------------ tcg/sparc64/tcg-target.c.inc | 28 ++++++++++++++++------------ tcg/tci/tcg-target-opc.h.inc | 1 + tcg/tci/tcg-target.c.inc | 18 +++++++++++++++--- 17 files changed, 150 insertions(+), 137 deletions(-)