@@ -18,6 +18,8 @@ C_O0_I1(r)
C_O1_I1(r, r)
C_O1_I2(r, r, rC)
C_O1_I2(r, r, ri)
+C_O1_I2(r, r, rI)
C_O1_I2(r, r, rU)
C_O1_I2(r, r, rW)
C_O1_I2(r, 0, rZ)
+C_O1_I2(r, rZ, rN)
@@ -640,6 +640,36 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
break;
+ case INDEX_op_add_i32:
+ if (c2) {
+ tcg_out_opc_addi_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_add_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_add_i64:
+ if (c2) {
+ tcg_out_opc_addi_d(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_add_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_sub_i32:
+ if (c2) {
+ tcg_out_opc_addi_w(s, a0, a1, -a2);
+ } else {
+ tcg_out_opc_sub_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_sub_i64:
+ if (c2) {
+ tcg_out_opc_addi_d(s, a0, a1, -a2);
+ } else {
+ tcg_out_opc_sub_d(s, a0, a1, a2);
+ }
+ break;
+
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
default:
@@ -701,6 +731,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_rotr_i64:
return C_O1_I2(r, r, ri);
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ return C_O1_I2(r, r, rI);
+
case INDEX_op_and_i32:
case INDEX_op_and_i64:
case INDEX_op_nor_i32:
@@ -723,6 +757,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
/* Must deposit into the same register as input */
return C_O1_I2(r, 0, rZ);
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ return C_O1_I2(r, rZ, rN);
+
default:
g_assert_not_reached();
}
The neg_i{32,64} ops is fully expressible with sub, so omitted for simplicity. Signed-off-by: WANG Xuerui <git@xen0n.name> --- tcg/loongarch64/tcg-target-con-set.h | 2 ++ tcg/loongarch64/tcg-target.c.inc | 38 ++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+)