@@ -40,6 +40,7 @@ DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
DEF(add, 1, 2, 0, TCG_OPF_INT)
+DEF(and, 1, 2, 0, TCG_OPF_INT)
DEF(setcond_i32, 1, 2, 1, 0)
DEF(negsetcond_i32, 1, 2, 1, 0)
@@ -62,7 +63,6 @@ DEF(rem_i32, 1, 2, 0, 0)
DEF(remu_i32, 1, 2, 0, 0)
DEF(div2_i32, 2, 3, 0, 0)
DEF(divu2_i32, 2, 3, 0, 0)
-DEF(and_i32, 1, 2, 0, 0)
DEF(or_i32, 1, 2, 0, 0)
DEF(xor_i32, 1, 2, 0, 0)
/* shifts/rotates */
@@ -124,7 +124,6 @@ DEF(rem_i64, 1, 2, 0, 0)
DEF(remu_i64, 1, 2, 0, 0)
DEF(div2_i64, 2, 3, 0, 0)
DEF(divu2_i64, 2, 3, 0, 0)
-DEF(and_i64, 1, 2, 0, 0)
DEF(or_i64, 1, 2, 0, 0)
DEF(xor_i64, 1, 2, 0, 0)
/* shifts/rotates */
@@ -1943,7 +1943,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
op_opc = INDEX_op_add;
goto do_reg_op;
case 0x2009: /* and Rm,Rn */
- op_opc = INDEX_op_and_i32;
+ op_opc = INDEX_op_and;
goto do_reg_op;
case 0x200a: /* xor Rm,Rn */
op_opc = INDEX_op_xor_i32;
@@ -2105,7 +2105,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
}
break;
- case INDEX_op_and_i32:
+ case INDEX_op_and:
if (op_dst != st_src) {
goto fail;
}
@@ -421,7 +421,8 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
CASE_OP_32_64(mul):
return x * y;
- CASE_OP_32_64_VEC(and):
+ case INDEX_op_and:
+ case INDEX_op_and_vec:
return x & y;
CASE_OP_32_64_VEC(or):
@@ -787,9 +788,7 @@ static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
/* Expand to AND with a temporary if no backend support. */
if (!TCG_TARGET_HAS_tst) {
- TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32
- ? INDEX_op_and_i32 : INDEX_op_and_i64);
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3);
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and, 3);
TCGArg tmp = arg_new_temp(ctx);
op2->args[0] = tmp;
@@ -882,8 +881,8 @@ static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
/* Expand to AND with a temporary if no backend support. */
if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
- TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
+ TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and, 3);
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and, 3);
TCGArg t1 = arg_new_temp(ctx);
TCGArg t2 = arg_new_temp(ctx);
@@ -1694,8 +1693,7 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
TempOptInfo *t2 = arg_info(op->args[2]);
int ofs = op->args[3];
int len = op->args[4];
- int width;
- TCGOpcode and_opc;
+ int width = ctx->type == TCG_TYPE_I32 ? 32 : 64;
uint64_t z_mask, s_mask;
if (ti_is_const(t1) && ti_is_const(t2)) {
@@ -1704,24 +1702,11 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
ti_const_val(t2)));
}
- switch (ctx->type) {
- case TCG_TYPE_I32:
- and_opc = INDEX_op_and_i32;
- width = 32;
- break;
- case TCG_TYPE_I64:
- and_opc = INDEX_op_and_i64;
- width = 64;
- break;
- default:
- g_assert_not_reached();
- }
-
/* Inserting a value into zero at offset 0. */
if (ti_is_const_val(t1, 0) && ofs == 0) {
uint64_t mask = MAKE_64BIT_MASK(0, len);
- op->opc = and_opc;
+ op->opc = INDEX_op_and;
op->args[1] = op->args[2];
op->args[2] = arg_new_constant(ctx, mask);
return fold_and(ctx, op);
@@ -1731,7 +1716,7 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
if (ti_is_const_val(t2, 0)) {
uint64_t mask = deposit64(-1, ofs, len, 0);
- op->opc = and_opc;
+ op->opc = INDEX_op_and;
op->args[2] = arg_new_constant(ctx, mask);
return fold_and(ctx, op);
}
@@ -2282,7 +2267,7 @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
{
- TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc;
+ TCGOpcode sub_opc, xor_opc, neg_opc, shr_opc;
TCGOpcode uext_opc = 0, sext_opc = 0;
TCGCond cond = op->args[3];
TCGArg ret, src1, src2;
@@ -2304,7 +2289,6 @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
switch (ctx->type) {
case TCG_TYPE_I32:
- and_opc = INDEX_op_and_i32;
sub_opc = INDEX_op_sub_i32;
xor_opc = INDEX_op_xor_i32;
shr_opc = INDEX_op_shr_i32;
@@ -2317,7 +2301,6 @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
}
break;
case TCG_TYPE_I64:
- and_opc = INDEX_op_and_i64;
sub_opc = INDEX_op_sub_i64;
xor_opc = INDEX_op_xor_i64;
shr_opc = INDEX_op_shr_i64;
@@ -2356,7 +2339,7 @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
op2->args[2] = arg_new_constant(ctx, sh);
src1 = ret;
}
- op->opc = and_opc;
+ op->opc = INDEX_op_and;
op->args[1] = src1;
op->args[2] = arg_new_constant(ctx, 1);
}
@@ -2833,7 +2816,8 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(add2):
done = fold_add2(&ctx, op);
break;
- CASE_OP_32_64_VEC(and):
+ case INDEX_op_and:
+ case INDEX_op_and_vec:
done = fold_and(&ctx, op);
break;
CASE_OP_32_64_VEC(andc):
@@ -401,7 +401,7 @@ void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg)
void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_and, ret, arg1, arg2);
}
void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@@ -1575,7 +1575,7 @@ void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_and, ret, arg1, arg2);
} else {
tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
@@ -992,8 +992,7 @@ static const TCGOutOp outop_notreached = {
static const TCGOutOp * const all_outop[NB_OPS] = {
[0 ... NB_OPS - 1] = &outop_notreached,
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
- OUTOP(INDEX_op_and_i32, TCGOutOpBinary, outop_and),
- OUTOP(INDEX_op_and_i64, TCGOutOpBinary, outop_and),
+ OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
};
#undef OUTOP
@@ -2208,6 +2207,7 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
return TCG_TARGET_HAS_qemu_ldst_i128;
case INDEX_op_add:
+ case INDEX_op_and:
case INDEX_op_mov:
return has_type;
@@ -2225,7 +2225,6 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_sub_i32:
case INDEX_op_neg_i32:
case INDEX_op_mul_i32:
- case INDEX_op_and_i32:
case INDEX_op_or_i32:
case INDEX_op_xor_i32:
case INDEX_op_shl_i32:
@@ -2308,7 +2307,6 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_sub_i64:
case INDEX_op_neg_i64:
case INDEX_op_mul_i64:
- case INDEX_op_and_i64:
case INDEX_op_or_i64:
case INDEX_op_xor_i64:
case INDEX_op_shl_i64:
@@ -5429,8 +5427,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
break;
case INDEX_op_add:
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
+ case INDEX_op_and:
{
const TCGOutOpBinary *out =
container_of(all_outop[op->opc], TCGOutOpBinary, base);
@@ -545,7 +545,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] * regs[r2];
break;
- CASE_32_64(and)
+ case INDEX_op_and:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] & regs[r2];
break;
@@ -1139,12 +1139,11 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
break;
case INDEX_op_add:
+ case INDEX_op_and:
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
case INDEX_op_or_i32:
case INDEX_op_or_i64:
case INDEX_op_xor_i32:
@@ -673,7 +673,7 @@ static const TCGOutOpBinary outop_add = {
static void tgen_and(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
- tcg_out_op_rrr(s, glue(INDEX_op_and_i,TCG_TARGET_REG_BITS), a0, a1, a2);
+ tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
}
static const TCGOutOpBinary outop_and = {
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/tcg/tcg-opc.h | 3 +-- target/sh4/translate.c | 4 ++-- tcg/optimize.c | 40 ++++++++++++---------------------------- tcg/tcg-op.c | 4 ++-- tcg/tcg.c | 9 +++------ tcg/tci.c | 5 ++--- tcg/tci/tcg-target.c.inc | 2 +- 7 files changed, 23 insertions(+), 44 deletions(-)