@@ -2643,6 +2643,8 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx)
#define VsrSW(i) s32[i]
#define VsrD(i) u64[i]
#define VsrSD(i) s64[i]
+#define VsrSF(i) f32[i]
+#define VsrDF(i) f64[i]
#else
#define VsrB(i) u8[15 - (i)]
#define VsrSB(i) s8[15 - (i)]
@@ -2652,6 +2654,8 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx)
#define VsrSW(i) s32[3 - (i)]
#define VsrD(i) u64[1 - (i)]
#define VsrSD(i) s64[1 - (i)]
+#define VsrSF(i) f32[3 - (i)]
+#define VsrDF(i) f64[1 - (i)]
#endif
static inline int vsr64_offset(int i, bool high)
@@ -414,7 +414,7 @@ void helper_store_fpscr(CPUPPCState *env, uint64_t val, uint32_t nibbles)
ppc_store_fpscr(env, val);
}
-void helper_fpscr_check_status(CPUPPCState *env)
+static void do_fpscr_check_status(CPUPPCState *env, uintptr_t raddr)
{
CPUState *cs = env_cpu(env);
target_ulong fpscr = env->fpscr;
@@ -455,13 +455,19 @@ void helper_fpscr_check_status(CPUPPCState *env)
}
cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code = error | POWERPC_EXCP_FP;
+ env->fpscr |= error ? FP_FEX : 0;
/* Deferred floating-point exception after target FPSCR update */
if (fp_exceptions_enabled(env)) {
raise_exception_err_ra(env, cs->exception_index,
- env->error_code, GETPC());
+ env->error_code, raddr);
}
}
+void helper_fpscr_check_status(CPUPPCState *env)
+{
+ do_fpscr_check_status(env, GETPC());
+}
+
static void do_float_check_status(CPUPPCState *env, bool change_fi,
uintptr_t raddr)
{
@@ -3468,3 +3474,187 @@ void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
*xt = t;
do_float_check_status(env, true, GETPC());
}
+
+static inline void vsxger_excp(CPUPPCState *env, uintptr_t retaddr)
+{
+ /*
+ * XV*GER instructions execute and set the FPSCR as if exceptions
+ * are disabled and only at the end throw an exception
+ */
+ target_ulong enable;
+ enable = env->fpscr & (FP_ENABLES | FP_FI | FP_FR);
+ env->fpscr &= ~(FP_ENABLES | FP_FI | FP_FR);
+ int status = get_float_exception_flags(&env->fp_status);
+ if (unlikely(status & float_flag_invalid)) {
+ if (status & float_flag_invalid_snan) {
+ float_invalid_op_vxsnan(env, 0);
+ }
+ if (status & float_flag_invalid_imz) {
+ float_invalid_op_vximz(env, false, 0);
+ }
+ if (status & float_flag_invalid_isi) {
+ float_invalid_op_vxisi(env, false, 0);
+ }
+ }
+ do_float_check_status(env, false, retaddr);
+ env->fpscr |= enable;
+ do_fpscr_check_status(env, retaddr);
+}
+
+typedef void vsxger_zero(ppc_vsr_t *at, int, int);
+
+typedef void vsxger_muladd_f(ppc_vsr_t *, ppc_vsr_t *, ppc_vsr_t *, int, int,
+ int flags, float_status *s);
+
+static void vsxger_muladd32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
+ int j, int flags, float_status *s)
+{
+ at[i].VsrSF(j) = float32_muladd(a->VsrSF(i), b->VsrSF(j),
+ at[i].VsrSF(j), flags, s);
+}
+
+static void vsxger_mul32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
+ int j, int flags, float_status *s)
+{
+ at[i].VsrSF(j) = float32_mul(a->VsrSF(i), b->VsrSF(j), s);
+}
+
+static void vsxger_zero32(ppc_vsr_t *at, int i, int j)
+{
+ at[i].VsrSF(j) = float32_zero;
+}
+
+static void vsxger_muladd64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
+ int j, int flags, float_status *s)
+{
+ if (j >= 2) {
+ j -= 2;
+ at[i].VsrDF(j) = float64_muladd(a[i / 2].VsrDF(i % 2), b->VsrDF(j),
+ at[i].VsrDF(j), flags, s);
+ }
+}
+
+static void vsxger_mul64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
+ int j, int flags, float_status *s)
+{
+ if (j >= 2) {
+ j -= 2;
+ at[i].VsrDF(j) = float64_mul(a[i / 2].VsrDF(i % 2), b->VsrDF(j), s);
+ }
+}
+
+static void vsxger_zero64(ppc_vsr_t *at, int i, int j)
+{
+ if (j >= 2) {
+ j -= 2;
+ at[i].VsrDF(j) = float64_zero;
+ }
+}
+
+static void vsxger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask, bool acc, bool neg_mul,
+ bool neg_acc, vsxger_muladd_f mul, vsxger_muladd_f muladd,
+ vsxger_zero zero)
+{
+ int i, j, xmsk_bit, ymsk_bit, op_flags;
+ uint8_t xmsk = mask & 0x0F;
+ uint8_t ymsk = (mask >> 4) & 0x0F;
+ float_status *excp_ptr = &env->fp_status;
+ op_flags = (neg_acc ^ neg_mul) ? float_muladd_negate_c : 0;
+ op_flags |= (neg_mul) ? float_muladd_negate_result : 0;
+ helper_reset_fpstatus(env);
+ for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
+ for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
+ if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
+ if (acc) {
+ muladd(at, a, b, i, j, op_flags, excp_ptr);
+ } else {
+ mul(at, a, b, i, j, op_flags, excp_ptr);
+ }
+ } else {
+ zero(at, i, j);
+ }
+ }
+ }
+ vsxger_excp(env, GETPC());
+}
+
+QEMU_FLATTEN
+void helper_XVF32GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, false, false, false, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, false, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, true, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, false, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, true, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, false, false, false, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, false, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, true, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, false, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, true, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
@@ -550,6 +550,16 @@ DEF_HELPER_5(XVI16GER2, void, env, vsr, vsr, acc, i32)
DEF_HELPER_5(XVI16GER2S, void, env, vsr, vsr, acc, i32)
DEF_HELPER_5(XVI16GER2PP, void, env, vsr, vsr, acc, i32)
DEF_HELPER_5(XVI16GER2SPP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF32GER, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF32GERPP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF32GERPN, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF32GERNP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF32GERNN, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF64GER, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF64GERPP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF64GERPN, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF64GERNP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF64GERNN, void, env, vsr, vsr, acc, i32)
DEF_HELPER_2(efscfsi, i32, env, i32)
DEF_HELPER_2(efscfui, i32, env, i32)
@@ -178,6 +178,7 @@
# 32 bit GER instructions have all mask bits considered 1
&MMIRR_XX3 xa xb xt pmsk xmsk ymsk
%xx_at 23:3
+%xx_xa_pair 2:1 17:4 !function=times_2
@XX3_at ...... ... .. ..... ..... ........ ... &MMIRR_XX3 xt=%xx_at xb=%xx_xb \
pmsk=255 xmsk=15 ymsk=15
@@ -761,3 +762,15 @@ XVI16GER2PP 111011 ... -- ..... ..... 01101011 ..- @XX3_at xa=%xx_xa
XVI8GER4SPP 111011 ... -- ..... ..... 01100011 ..- @XX3_at xa=%xx_xa
XVI16GER2S 111011 ... -- ..... ..... 00101011 ..- @XX3_at xa=%xx_xa
XVI16GER2SPP 111011 ... -- ..... ..... 00101010 ..- @XX3_at xa=%xx_xa
+
+XVF32GER 111011 ... -- ..... ..... 00011011 ..- @XX3_at xa=%xx_xa
+XVF32GERPP 111011 ... -- ..... ..... 00011010 ..- @XX3_at xa=%xx_xa
+XVF32GERPN 111011 ... -- ..... ..... 10011010 ..- @XX3_at xa=%xx_xa
+XVF32GERNP 111011 ... -- ..... ..... 01011010 ..- @XX3_at xa=%xx_xa
+XVF32GERNN 111011 ... -- ..... ..... 11011010 ..- @XX3_at xa=%xx_xa
+
+XVF64GER 111011 ... -- .... 0 ..... 00111011 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERPP 111011 ... -- .... 0 ..... 00111010 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERPN 111011 ... -- .... 0 ..... 10111010 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERNP 111011 ... -- .... 0 ..... 01111010 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERNN 111011 ... -- .... 0 ..... 11111010 ..- @XX3_at xa=%xx_xa_pair
@@ -2898,6 +2898,18 @@ TRANS64(PMXVI16GER2PP, do_ger, gen_helper_XVI16GER2PP)
TRANS64(PMXVI16GER2S, do_ger, gen_helper_XVI16GER2S)
TRANS64(PMXVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP)
+TRANS(XVF32GER, do_ger, gen_helper_XVF32GER)
+TRANS(XVF32GERPP, do_ger, gen_helper_XVF32GERPP)
+TRANS(XVF32GERPN, do_ger, gen_helper_XVF32GERPN)
+TRANS(XVF32GERNP, do_ger, gen_helper_XVF32GERNP)
+TRANS(XVF32GERNN, do_ger, gen_helper_XVF32GERNN)
+
+TRANS(XVF64GER, do_ger, gen_helper_XVF64GER)
+TRANS(XVF64GERPP, do_ger, gen_helper_XVF64GERPP)
+TRANS(XVF64GERPN, do_ger, gen_helper_XVF64GERPN)
+TRANS(XVF64GERNP, do_ger, gen_helper_XVF64GERNP)
+TRANS(XVF64GERNN, do_ger, gen_helper_XVF64GERNN)
+
#undef GEN_XX2FORM
#undef GEN_XX3FORM
#undef GEN_XX2IFORM