@@ -1867,6 +1867,40 @@ static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
qemu_system_guest_panicked(NULL);
}
+/* try to detect pgm check loops */
+static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run)
+{
+ CPUState *cs = CPU(cpu);
+ PSW oldpsw, newpsw;
+
+ cpu_synchronize_state(cs);
+ newpsw.mask = ldq_phys(cs->as, cpu->env.psa +
+ offsetof(LowCore, program_new_psw));
+ newpsw.addr = ldq_phys(cs->as, cpu->env.psa +
+ offsetof(LowCore, program_new_psw) + 8);
+ oldpsw.mask = run->psw_mask;
+ oldpsw.addr = run->psw_addr;
+ /*
+ * Avoid endless loops of operation exceptions, if the pgm new
+ * PSW will cause a new operation exception.
+ * The heuristic checks if the pgm new psw is within 6 bytes before
+ * the faulting psw address (with same DAT, AS settings) and the
+ * new psw is not a wait psw and the fault was not triggered by
+ * problem state. In that case go into crashed state.
+ */
+
+ if (oldpsw.addr - newpsw.addr <= 6 &&
+ !(newpsw.mask & PSW_MASK_WAIT) &&
+ !(oldpsw.mask & PSW_MASK_PSTATE) &&
+ (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
+ (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) {
+ unmanageable_intercept(cpu, "operation exception loop",
+ offsetof(LowCore, program_new_psw));
+ return EXCP_HALTED;
+ }
+ return 0;
+}
+
static int handle_intercept(S390CPU *cpu)
{
CPUState *cs = CPU(cpu);
@@ -1914,11 +1948,14 @@ static int handle_intercept(S390CPU *cpu)
r = EXCP_HALTED;
break;
case ICPT_OPEREXC:
- /* currently only instr 0x0000 after enabled via capability */
+ /* check for break points */
r = handle_sw_breakpoint(cpu, run);
if (r == -ENOENT) {
- enter_pgmcheck(cpu, PGM_OPERATION);
- r = 0;
+ /* Then check for potential pgm check loops */
+ r = handle_oper_loop(cpu, run);
+ if (r == 0) {
+ enter_pgmcheck(cpu, PGM_OPERATION);
+ }
}
break;
case ICPT_SOFT_INTERCEPT: