@@ -379,3 +379,616 @@ uint64_t helper_fp_logb_d(CPULoongArchState *env, uint64_t fp)
update_fcsr0(env, GETPC());
return fp1;
}
+
+void helper_movreg2cf_i32(CPULoongArchState *env, uint32_t cd, uint32_t src)
+{
+ env->active_fpu.cf[cd & 0x7] = src & 0x1;
+}
+
+void helper_movreg2cf_i64(CPULoongArchState *env, uint32_t cd, uint64_t src)
+{
+ env->active_fpu.cf[cd & 0x7] = src & 0x1;
+}
+
+/* fcmp.cond.s */
+uint32_t helper_fp_cmp_caf_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = (float32_unordered_quiet(fp1, fp, &env->active_fpu.fp_status), 0);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_cun_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_unordered_quiet(fp1, fp, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_ceq_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_eq_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_cueq_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_unordered_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_eq_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_clt_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_lt_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_cult_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_unordered_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_lt_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_cle_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_le_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_cule_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_unordered_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_le_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_cne_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_lt_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_lt_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_cor_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_le_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_le_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_cune_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_unordered_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_lt_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_lt_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+
+uint32_t helper_fp_cmp_saf_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = (float32_unordered(fp1, fp, &env->active_fpu.fp_status), 0);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_sun_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_unordered(fp1, fp, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_seq_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_eq(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_sueq_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_unordered(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_eq(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_slt_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_lt(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_sult_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_unordered(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_lt(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_sle_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_le(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_sule_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_unordered(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_le(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_sne_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_lt(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_lt(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_sor_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_le(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_le(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t helper_fp_cmp_sune_s(CPULoongArchState *env, uint32_t fp,
+ uint32_t fp1)
+{
+ uint64_t ret;
+ ret = float32_unordered(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_lt(fp1, fp, &env->active_fpu.fp_status) ||
+ float32_lt(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+/* fcmp.cond.d */
+uint64_t helper_fp_cmp_caf_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = (float64_unordered_quiet(fp1, fp, &env->active_fpu.fp_status), 0);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_cun_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_unordered_quiet(fp1, fp, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_ceq_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_eq_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_cueq_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_unordered_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_eq_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_clt_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_lt_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_cult_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_unordered_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_lt_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_cle_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_le_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_cule_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_unordered_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_le_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_cne_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_lt_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_lt_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_cor_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_le_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_le_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_cune_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_unordered_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_lt_quiet(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_lt_quiet(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_saf_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = (float64_unordered(fp1, fp, &env->active_fpu.fp_status), 0);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_sun_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_unordered(fp1, fp, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_seq_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_eq(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_sueq_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_unordered(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_eq(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_slt_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_lt(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_sult_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_unordered(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_lt(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_sle_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_le(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_sule_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_unordered(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_le(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_sne_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_lt(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_lt(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_sor_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_le(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_le(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_fp_cmp_sune_d(CPULoongArchState *env, uint64_t fp,
+ uint64_t fp1)
+{
+ uint64_t ret;
+ ret = float64_unordered(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_lt(fp1, fp, &env->active_fpu.fp_status) ||
+ float64_lt(fp, fp1, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ if (ret) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
@@ -72,3 +72,52 @@ DEF_HELPER_2(fp_recip_d, i64, env, i64)
DEF_HELPER_FLAGS_2(fp_class_s, TCG_CALL_NO_RWG_SE, i32, env, i32)
DEF_HELPER_FLAGS_2(fp_class_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
+
+/* fcmp.cond.s/d */
+DEF_HELPER_3(fp_cmp_caf_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_caf_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_cun_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_cun_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_ceq_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_ceq_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_cueq_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_cueq_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_clt_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_clt_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_cult_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_cult_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_cle_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_cle_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_cule_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_cule_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_cne_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_cne_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_cor_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_cor_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_cune_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_cune_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_saf_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_saf_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_sun_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_sun_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_seq_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_seq_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_sueq_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_sueq_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_slt_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_slt_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_sult_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_sult_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_sle_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_sle_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_sule_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_sule_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_sne_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_sne_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_sor_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_sor_s, i32, env, i32, i32)
+DEF_HELPER_3(fp_cmp_sune_d, i64, env, i64, i64)
+DEF_HELPER_3(fp_cmp_sune_s, i32, env, i32, i32)
+
+DEF_HELPER_3(movreg2cf_i32, void, env, i32, i32)
+DEF_HELPER_3(movreg2cf_i64, void, env, i32, i64)
@@ -32,6 +32,8 @@
%fj 5:5
%fk 10:5
%fa 15:5
+%cd 0:3
+%fcond 15:5
#
# Argument sets
@@ -57,6 +59,7 @@
&fmt_fdfjfk fd fj fk
&fmt_fdfjfkfa fd fj fk fa
&fmt_fdfj fd fj
+&fmt_cdfjfkfcond cd fj fk fcond
#
# Formats
@@ -82,6 +85,7 @@
@fmt_fdfjfk .... ........ ..... ..... ..... ..... &fmt_fdfjfk %fd %fj %fk
@fmt_fdfjfkfa .... ........ ..... ..... ..... ..... &fmt_fdfjfkfa %fd %fj %fk %fa
@fmt_fdfj .... ........ ..... ..... ..... ..... &fmt_fdfj %fd %fj
+@fmt_cdfjfkfcond .... ........ ..... ..... ..... .. ... &fmt_cdfjfkfcond %cd %fj %fk %fcond
#
# Fixed point arithmetic operation instruction
@@ -344,3 +348,9 @@ fcopysign_s 0000 00010001 00101 ..... ..... ..... @fmt_fdfjfk
fcopysign_d 0000 00010001 00110 ..... ..... ..... @fmt_fdfjfk
fclass_s 0000 00010001 01000 01101 ..... ..... @fmt_fdfj
fclass_d 0000 00010001 01000 01110 ..... ..... @fmt_fdfj
+
+#
+# Floating point compare instruction
+#
+fcmp_cond_s 0000 11000001 ..... ..... ..... 00 ... @fmt_cdfjfkfcond
+fcmp_cond_d 0000 11000010 ..... ..... ..... 00 ... @fmt_cdfjfkfcond
@@ -1871,3 +1871,18 @@ static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
gen_loongarch_fp_arith(ctx, LA_OPC_FCLASS_D, 0, a->fj, a->fd);
return true;
}
+
+/* Floating point compare instruction translation */
+static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
+{
+ check_fpu_enabled(ctx);
+ gen_loongarch_fp_cmp_s(ctx, a->fcond, a->fk, a->fj, a->cd);
+ return true;
+}
+
+static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a)
+{
+ check_fpu_enabled(ctx);
+ gen_loongarch_fp_cmp_d(ctx, a->fcond, a->fk, a->fj, a->cd);
+ return true;
+}
@@ -1679,6 +1679,96 @@ static void gen_loongarch_fp_arith(DisasContext *ctx, uint32_t opc,
}
}
+#define FP_CMP(fmt, bits, STORE) \
+static inline void gen_loongarch_fp_cmp_ ## fmt(DisasContext *ctx, int fcond, \
+ int fk, int fj, int cd) \
+{ \
+ TCGv_i ## bits fp0 = tcg_temp_new_i ## bits(); \
+ TCGv_i ## bits fp1 = tcg_temp_new_i ## bits(); \
+ TCGv_i32 fcc = tcg_const_i32(cd); \
+ check_fpu_enabled(ctx); \
+ gen_load_fpr ## bits(ctx, fp0, fj); \
+ gen_load_fpr ## bits(ctx, fp1, fk); \
+ switch (fcond) { \
+ case 0: \
+ gen_helper_fp_cmp_caf_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 1: \
+ gen_helper_fp_cmp_saf_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 2: \
+ gen_helper_fp_cmp_clt_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 3: \
+ gen_helper_fp_cmp_slt_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 4: \
+ gen_helper_fp_cmp_ceq_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 5: \
+ gen_helper_fp_cmp_seq_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 6: \
+ gen_helper_fp_cmp_cle_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 7: \
+ gen_helper_fp_cmp_sle_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 8: \
+ gen_helper_fp_cmp_cun_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 9: \
+ gen_helper_fp_cmp_sun_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 10: \
+ gen_helper_fp_cmp_cult_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 11: \
+ gen_helper_fp_cmp_sult_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 12: \
+ gen_helper_fp_cmp_cueq_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 13: \
+ gen_helper_fp_cmp_sueq_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 14: \
+ gen_helper_fp_cmp_cule_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 15: \
+ gen_helper_fp_cmp_sule_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 16: \
+ gen_helper_fp_cmp_cne_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 17: \
+ gen_helper_fp_cmp_sne_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 20: \
+ gen_helper_fp_cmp_cor_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 21: \
+ gen_helper_fp_cmp_sor_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 24: \
+ gen_helper_fp_cmp_cune_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 25: \
+ gen_helper_fp_cmp_sune_ ## fmt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ STORE; \
+ tcg_temp_free_i ## bits(fp0); \
+ tcg_temp_free_i ## bits(fp1); \
+ tcg_temp_free_i32(fcc); \
+}
+
+FP_CMP(d, 64, gen_helper_movreg2cf_i64(cpu_env, fcc, fp0))
+FP_CMP(s, 32, gen_helper_movreg2cf_i32(cpu_env, fcc, fp0))
+#undef FP_CMP
+
static void loongarch_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
{
}
This patch implement floating point comparison instruction translation. This includes: - FCMP.cond.{S/D} Signed-off-by: Song Gao <gaosong@loongson.cn> --- target/loongarch/fpu_helper.c | 613 ++++++++++++++++++++++++++++++++++++++++++ target/loongarch/helper.h | 49 ++++ target/loongarch/insns.decode | 10 + target/loongarch/trans.inc.c | 15 ++ target/loongarch/translate.c | 90 +++++++ 5 files changed, 777 insertions(+)