@@ -47,6 +47,7 @@ DEF(nand, 1, 2, 0, TCG_OPF_INT)
DEF(nor, 1, 2, 0, TCG_OPF_INT)
DEF(or, 1, 2, 0, TCG_OPF_INT)
DEF(orc, 1, 2, 0, TCG_OPF_INT)
+DEF(sub, 1, 2, 0, TCG_OPF_INT)
DEF(xor, 1, 2, 0, TCG_OPF_INT)
DEF(setcond_i32, 1, 2, 1, 0)
@@ -62,7 +63,6 @@ DEF(st8_i32, 0, 2, 1, 0)
DEF(st16_i32, 0, 2, 1, 0)
DEF(st_i32, 0, 2, 1, 0)
/* arith */
-DEF(sub_i32, 1, 2, 0, 0)
DEF(mul_i32, 1, 2, 0, 0)
DEF(div_i32, 1, 2, 0, 0)
DEF(divu_i32, 1, 2, 0, 0)
@@ -116,7 +116,6 @@ DEF(st16_i64, 0, 2, 1, 0)
DEF(st32_i64, 0, 2, 1, 0)
DEF(st_i64, 0, 2, 1, 0)
/* arith */
-DEF(sub_i64, 1, 2, 0, 0)
DEF(mul_i64, 1, 2, 0, 0)
DEF(div_i64, 1, 2, 0, 0)
DEF(divu_i64, 1, 2, 0, 0)
@@ -415,7 +415,7 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
case INDEX_op_add:
return x + y;
- CASE_OP_32_64(sub):
+ case INDEX_op_sub:
return x - y;
CASE_OP_32_64(mul):
@@ -3058,7 +3058,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(sextract):
done = fold_sextract(&ctx, op);
break;
- CASE_OP_32_64(sub):
+ case INDEX_op_sub:
done = fold_sub(&ctx, op);
break;
case INDEX_op_sub_vec:
@@ -377,7 +377,7 @@ void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_sub_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_sub, ret, arg1, arg2);
}
void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2)
@@ -1565,7 +1565,7 @@ void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_sub_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_sub, ret, arg1, arg2);
} else {
tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
@@ -1007,8 +1007,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
- OUTOP(INDEX_op_sub_i32, TCGOutOpSubtract, outop_sub),
- OUTOP(INDEX_op_sub_i64, TCGOutOpSubtract, outop_sub),
+ OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
};
@@ -4011,10 +4010,8 @@ liveness_pass_1(TCGContext *s)
opc_new = INDEX_op_add;
goto do_addsub2;
case INDEX_op_sub2_i32:
- opc_new = INDEX_op_sub_i32;
- goto do_addsub2;
case INDEX_op_sub2_i64:
- opc_new = INDEX_op_sub_i64;
+ opc_new = INDEX_op_sub;
do_addsub2:
nb_iargs = 4;
nb_oargs = 2;
@@ -5441,8 +5438,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
}
break;
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
+ case INDEX_op_sub:
{
const TCGOutOpSubtract *out = &outop_sub;
@@ -537,7 +537,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] + regs[r2];
break;
- CASE_32_64(sub)
+ case INDEX_op_sub:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] - regs[r2];
break;
@@ -1136,9 +1136,8 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
case INDEX_op_nor:
case INDEX_op_or:
case INDEX_op_orc:
+ case INDEX_op_sub:
case INDEX_op_xor:
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
case INDEX_op_div_i32:
@@ -734,7 +734,7 @@ static const TCGOutOpBinary outop_orc = {
static void tgen_sub(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
- tcg_out_op_rrr(s, glue(INDEX_op_sub_i,TCG_TARGET_REG_BITS), a0, a1, a2);
+ tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
}
static const TCGOutOpSubtract outop_sub = {
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/tcg/tcg-opc.h | 3 +-- tcg/optimize.c | 4 ++-- tcg/tcg-op.c | 4 ++-- tcg/tcg.c | 10 +++------- tcg/tci.c | 5 ++--- tcg/tci/tcg-target.c.inc | 2 +- 6 files changed, 11 insertions(+), 17 deletions(-)