@@ -24,7 +24,6 @@ C_O1_I2(r, r, rI)
C_O1_I2(r, r, rIK)
C_O1_I2(r, r, rJ)
C_O1_I2(r, r, rWZ)
-C_O1_I2(r, rZ, rN)
C_O1_I2(r, rZ, rZ)
C_O1_I4(r, rZ, rZ, rZ, 0)
C_O1_I4(r, rZ, rZ, rZ, rZ)
@@ -22,8 +22,7 @@ C_O1_I1(v, r)
C_O1_I1(v, v)
C_O1_I1(v, vr)
C_O1_I2(r, 0, rZ)
-C_O1_I2(r, rI, ri)
-C_O1_I2(r, rI, rT)
+C_O1_I2(r, rI, r)
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rC)
@@ -16,7 +16,6 @@ C_O1_I1(r, r)
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI)
-C_O1_I2(r, rZ, rN)
C_O1_I2(r, rZ, rZ)
C_N1_I2(r, r, rM)
C_O1_I4(r, r, rI, rM, rM)
@@ -18,6 +18,5 @@ REGS('v', ALL_VECTOR_REGS)
CONST('I', TCG_CT_CONST_S12)
CONST('K', TCG_CT_CONST_S5)
CONST('L', TCG_CT_CONST_CMP_VI)
-CONST('N', TCG_CT_CONST_N12)
CONST('M', TCG_CT_CONST_M12)
CONST('Z', TCG_CT_CONST_ZERO)
@@ -970,6 +970,14 @@ typedef struct TCGOutOpBinary {
TCGReg a0, TCGReg a1, tcg_target_long a2);
} TCGOutOpBinary;
+typedef struct TCGOutOpSubtract {
+ TCGOutOp base;
+ void (*out_rrr)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2);
+ void (*out_rir)(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2);
+} TCGOutOpSubtract;
+
#include "tcg-target.c.inc"
#ifndef CONFIG_TCG_INTERPRETER
@@ -999,6 +1007,8 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
+ OUTOP(INDEX_op_sub_i32, TCGOutOpSubtract, outop_sub),
+ OUTOP(INDEX_op_sub_i64, TCGOutOpSubtract, outop_sub),
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
};
@@ -2231,7 +2241,6 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
- case INDEX_op_sub_i32:
case INDEX_op_neg_i32:
case INDEX_op_mul_i32:
case INDEX_op_shl_i32:
@@ -2301,7 +2310,6 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
- case INDEX_op_sub_i64:
case INDEX_op_neg_i64:
case INDEX_op_mul_i64:
case INDEX_op_shl_i64:
@@ -5433,6 +5441,20 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
}
break;
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ {
+ const TCGOutOpSubtract *out = &outop_sub;
+
+ tcg_debug_assert(!const_args[2]);
+ if (const_args[1]) {
+ out->out_rir(s, type, new_args[0], new_args[1], new_args[2]);
+ } else {
+ out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]);
+ }
+ }
+ break;
+
default:
if (def->flags & TCG_OPF_VECTOR) {
tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
@@ -2205,6 +2205,17 @@ static const TCGOutOpBinary outop_orc = {
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3502, SUB, type, a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
@@ -2294,15 +2305,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2, 3);
break;
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- if (c2) {
- tgen_addi(s, ext, a0, a1, -a2);
- } else {
- tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2);
- }
- break;
-
case INDEX_op_neg_i64:
case INDEX_op_neg_i32:
tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
@@ -3026,10 +3028,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st_i64:
return C_O0_I2(rZ, r);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, r, rA);
-
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
case INDEX_op_negsetcond_i32:
@@ -1941,6 +1941,24 @@ static const TCGOutOpBinary outop_orc = {
.base.static_constraint = C_NotImplemented,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_SUB, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_subfi(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSB, a0, a2, encode_imm_nofail(a1));
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, rI, r),
+ .out_rrr = tgen_sub,
+ .out_rir = tgen_subfi,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
@@ -2008,15 +2026,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
ARITH_MVN, args[0], 0, args[3], const_args[3]);
break;
- case INDEX_op_sub_i32:
- if (const_args[1]) {
- tcg_out_dat_imm(s, COND_AL, ARITH_RSB,
- args[0], args[2], encode_imm_nofail(args[1]));
- } else {
- tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
- args[0], args[1], args[2], const_args[2]);
- }
- break;
case INDEX_op_add2_i32:
a0 = args[0], a1 = args[1], a2 = args[2];
a3 = args[3], a4 = args[4], a5 = args[5];
@@ -2277,8 +2286,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_setcond_i32:
case INDEX_op_negsetcond_i32:
return C_O1_I2(r, r, rIN);
- case INDEX_op_sub_i32:
- return C_O1_I2(r, rI, r);
case INDEX_op_clz_i32:
case INDEX_op_ctz_i32:
@@ -2734,6 +2734,18 @@ static const TCGOutOpBinary outop_orc = {
.base.static_constraint = C_NotImplemented,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithr(s, ARITH_SUB + rexw, a0, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, 0, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
@@ -2836,15 +2848,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
}
break;
- OP_32_64(sub):
- c = ARITH_SUB;
- if (const_a2) {
- tgen_arithi(s, c + rexw, a0, a2, 0);
- } else {
- tgen_arithr(s, c + rexw, a0, a2);
- }
- break;
-
OP_32_64(mul):
if (const_a2) {
int32_t val;
@@ -3784,8 +3787,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st_i64:
return C_O0_I2(re, r);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
return C_O1_I2(r, 0, re);
@@ -1371,6 +1371,21 @@ static const TCGOutOpBinary outop_orc = {
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_sub_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_sub_d(s, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
@@ -1591,21 +1606,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
}
break;
- case INDEX_op_sub_i32:
- if (c2) {
- tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
- } else {
- tcg_out_opc_sub_w(s, a0, a1, a2);
- }
- break;
- case INDEX_op_sub_i64:
- if (c2) {
- tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
- } else {
- tcg_out_opc_sub_d(s, a0, a1, a2);
- }
- break;
-
case INDEX_op_neg_i32:
tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1);
break;
@@ -2337,10 +2337,8 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
/* Must deposit into the same register as input */
return C_O1_I2(r, 0, rZ);
- case INDEX_op_sub_i32:
case INDEX_op_setcond_i32:
return C_O1_I2(r, rZ, ri);
- case INDEX_op_sub_i64:
case INDEX_op_setcond_i64:
return C_O1_I2(r, rZ, rJ);
@@ -1776,6 +1776,18 @@ static const TCGOutOpBinary outop_orc = {
.base.static_constraint = C_NotImplemented,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SUBU : OPC_DSUBU;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
@@ -1872,22 +1884,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_ldst(s, i1, a0, a1, a2);
break;
- do_binaryv:
- tcg_out_opc_reg(s, i1, a0, a1, a2);
- break;
-
- case INDEX_op_sub_i32:
- i1 = OPC_SUBU, i2 = OPC_ADDIU;
- goto do_subtract;
- case INDEX_op_sub_i64:
- i1 = OPC_DSUBU, i2 = OPC_DADDIU;
- do_subtract:
- if (c2) {
- tcg_out_opc_imm(s, i2, a0, a1, -a2);
- break;
- }
- goto do_binaryv;
-
case INDEX_op_mul_i32:
if (use_mips32_instructions) {
tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
@@ -2288,9 +2284,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st_i64:
return C_O0_I2(rZ, r);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, rZ, rN);
case INDEX_op_mul_i32:
case INDEX_op_mulsh_i32:
case INDEX_op_muluh_i32:
@@ -3054,6 +3054,24 @@ static const TCGOutOpBinary outop_orc = {
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, SUBF | TAB(a0, a2, a1));
+}
+
+static void tgen_subfi(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, rI, r),
+ .out_rrr = tgen_sub,
+ .out_rir = tgen_subfi,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
@@ -3142,21 +3160,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
break;
- case INDEX_op_sub_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
- } else {
- tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
- }
- } else if (const_args[2]) {
- tgen_addi(s, type, a0, a1, (int32_t)-a2);
- } else {
- tcg_out32(s, SUBF | TAB(a0, a2, a1));
- }
- break;
-
case INDEX_op_clz_i32:
tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
args[2], const_args[2]);
@@ -3269,21 +3272,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
break;
- case INDEX_op_sub_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
- } else {
- tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
- }
- } else if (const_args[2]) {
- tgen_addi(s, type, a0, a1, -a2);
- } else {
- tcg_out32(s, SUBF | TAB(a0, a2, a1));
- }
- break;
-
case INDEX_op_shl_i64:
if (const_args[2]) {
/* Limit immediate shift count lest we create an illegal insn. */
@@ -4269,10 +4257,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_muluh_i64:
return C_O1_I2(r, r, r);
- case INDEX_op_sub_i32:
- return C_O1_I2(r, rI, ri);
- case INDEX_op_sub_i64:
- return C_O1_I2(r, rI, rT);
case INDEX_op_clz_i32:
case INDEX_op_ctz_i32:
case INDEX_op_clz_i64:
@@ -114,10 +114,9 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define TCG_CT_CONST_ZERO 0x100
#define TCG_CT_CONST_S12 0x200
-#define TCG_CT_CONST_N12 0x400
-#define TCG_CT_CONST_M12 0x800
-#define TCG_CT_CONST_S5 0x1000
-#define TCG_CT_CONST_CMP_VI 0x2000
+#define TCG_CT_CONST_M12 0x400
+#define TCG_CT_CONST_S5 0x800
+#define TCG_CT_CONST_CMP_VI 0x1000
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
@@ -403,13 +402,6 @@ static bool tcg_target_const_match(int64_t val, int ct,
if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
return 1;
}
- /*
- * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
- * Used for subtraction, where a constant must be handled by ADDI.
- */
- if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
- return 1;
- }
/*
* Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
* Used by addsub2 and movcond, which may need the negative value,
@@ -2058,6 +2050,18 @@ static const TCGOutOpBinary outop_orc = {
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SUBW : OPC_SUB;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
@@ -2139,21 +2143,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
break;
- case INDEX_op_sub_i32:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
- } else {
- tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
- }
- break;
- case INDEX_op_sub_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
- } else {
- tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
- }
- break;
-
case INDEX_op_not_i32:
case INDEX_op_not_i64:
tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
@@ -2714,10 +2703,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_negsetcond_i64:
return C_O1_I2(r, r, rI);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, rZ, rN);
-
case INDEX_op_mul_i32:
case INDEX_op_mulsh_i32:
case INDEX_op_muluh_i32:
@@ -2331,6 +2331,23 @@ static const TCGOutOpBinary outop_orc = {
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, SR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
@@ -2413,17 +2430,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
break;
- case INDEX_op_sub_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tgen_addi(s, type, a0, a1, (int32_t)-a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RR, SR, a0, a2);
- } else {
- tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
- }
- break;
-
case INDEX_op_neg_i32:
tcg_out_insn(s, RR, LCR, args[0], args[1]);
break;
@@ -2624,15 +2630,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
break;
- case INDEX_op_sub_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tgen_addi(s, type, a0, a1, -a2);
- } else {
- tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
- }
- break;
-
case INDEX_op_neg_i64:
tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
break;
@@ -3308,10 +3305,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_clz_i64:
return C_O1_I2(r, r, rI);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, r, ri);
-
case INDEX_op_mul_i32:
return (HAVE_FACILITY(MISC_INSN_EXT2)
? C_O1_I2(r, r, ri)
@@ -1377,6 +1377,17 @@ static const TCGOutOpBinary outop_orc = {
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUB);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
@@ -1449,9 +1460,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
case INDEX_op_st32_i64:
tcg_out_ldst(s, a0, a1, a2, STW);
break;
- OP_32_64(sub):
- c = ARITH_SUB;
- goto gen_arith;
case INDEX_op_shl_i32:
c = SHIFT_SLL;
do_shift32:
@@ -1671,8 +1679,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_div_i64:
case INDEX_op_divu_i32:
case INDEX_op_divu_i64:
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
case INDEX_op_shl_i32:
case INDEX_op_shl_i64:
case INDEX_op_shr_i32:
@@ -91,8 +91,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_rem_i64:
case INDEX_op_remu_i32:
case INDEX_op_remu_i64:
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
case INDEX_op_shl_i32:
@@ -733,6 +731,17 @@ static const TCGOutOpBinary outop_orc = {
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, glue(INDEX_op_sub_i,TCG_TARGET_REG_BITS), a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
@@ -786,7 +795,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
break;
- CASE_32_64(sub)
CASE_32_64(mul)
CASE_32_64(shl)
CASE_32_64(shr)
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/mips/tcg-target-con-set.h | 1 - tcg/ppc/tcg-target-con-set.h | 3 +- tcg/riscv/tcg-target-con-set.h | 1 - tcg/riscv/tcg-target-con-str.h | 1 - tcg/tcg.c | 26 ++++++++++++++-- tcg/aarch64/tcg-target.c.inc | 24 +++++++-------- tcg/arm/tcg-target.c.inc | 29 +++++++++++------- tcg/i386/tcg-target.c.inc | 23 +++++++------- tcg/loongarch64/tcg-target.c.inc | 32 +++++++++----------- tcg/mips/tcg-target.c.inc | 31 ++++++++----------- tcg/ppc/tcg-target.c.inc | 52 +++++++++++--------------------- tcg/riscv/tcg-target.c.inc | 45 +++++++++------------------ tcg/s390x/tcg-target.c.inc | 41 +++++++++++-------------- tcg/sparc64/tcg-target.c.inc | 16 +++++++--- tcg/tci/tcg-target.c.inc | 14 +++++++-- 15 files changed, 165 insertions(+), 174 deletions(-)