@@ -78,7 +78,8 @@ static bool arm_cpu_has_work(CPUState *cs)
&& cs->interrupt_request &
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
- | CPU_INTERRUPT_SERROR | CPU_INTERRUPT_EXITTB);
+ | CPU_INTERRUPT_SERROR | CPU_INTERRUPT_VSERROR
+ | CPU_INTERRUPT_EXITTB);
}
void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
@@ -452,6 +453,12 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
case EXCP_SERROR:
pstate_unmasked = !(env->daif & PSTATE_A);
break;
+ case EXCP_VSERROR:
+ if (secure || !(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) {
+ /* VSError is only taken when hypervized and non-secure. */
+ return false;
+ }
+ return !(env->daif & PSTATE_A);
default:
g_assert_not_reached();
}
@@ -550,6 +557,15 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
}
+ if (interrupt_request & CPU_INTERRUPT_VSERROR) {
+ excp_idx = EXCP_VSERROR;
+ target_el = 1;
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
+ cur_el, secure, hcr_el2)) {
+ goto found;
+ }
+ }
+
if (interrupt_request & CPU_INTERRUPT_FIQ) {
excp_idx = EXCP_FIQ;
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
@@ -558,6 +574,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
goto found;
}
}
+
if (interrupt_request & CPU_INTERRUPT_HARD) {
excp_idx = EXCP_IRQ;
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
@@ -566,6 +583,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
goto found;
}
}
+
if (interrupt_request & CPU_INTERRUPT_VIRQ) {
excp_idx = EXCP_VIRQ;
target_el = 1;
@@ -574,6 +592,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
goto found;
}
}
+
if (interrupt_request & CPU_INTERRUPT_VFIQ) {
excp_idx = EXCP_VFIQ;
target_el = 1;
@@ -672,6 +691,28 @@ void arm_cpu_update_vfiq(ARMCPU *cpu)
}
}
+void arm_cpu_update_vserror(ARMCPU *cpu)
+{
+ /*
+ * Update the interrupt level for virtual SError, which is the logical
+ * OR of the HCR_EL2.VSE bit and the input line level from the GIC.
+ */
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+
+ bool new_state = (env->cp15.hcr_el2 & HCR_VSE) ||
+ (env->irq_line_state & CPU_INTERRUPT_VSERROR);
+
+ if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERROR) != 0)) {
+ if (new_state) {
+ cpu_interrupt(cs, CPU_INTERRUPT_VSERROR);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERROR);
+ }
+ }
+}
+
+
#ifndef CONFIG_USER_ONLY
static void arm_cpu_set_irq(void *opaque, int irq, int level)
{
@@ -684,6 +725,7 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
[ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
[ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
[ARM_CPU_SERROR] = CPU_INTERRUPT_SERROR,
+ [ARM_CPU_VSERROR] = CPU_INTERRUPT_VSERROR,
};
if (level) {
@@ -710,6 +752,10 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
cpu_reset_interrupt(cs, mask[irq]);
}
break;
+ case ARM_CPU_VSERROR:
+ assert(arm_feature(env, ARM_FEATURE_EL2));
+ arm_cpu_update_vserror(cpu);
+ break;
default:
g_assert_not_reached();
}
@@ -50,6 +50,7 @@
#define EXCP_LSERR 21 /* v8M LSERR SecureFault */
#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
#define EXCP_SERROR 23 /* SError Interrupt */
+#define EXCP_VSERROR 24 /* Virtual SError Interrupt */
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
#define ARMV7M_EXCP_RESET 1
@@ -80,10 +81,11 @@ enum {
};
/* ARM-specific interrupt pending bits. */
-#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
-#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
-#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
-#define CPU_INTERRUPT_SERROR CPU_INTERRUPT_TGT_EXT_4
+#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_0
+#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_SERROR CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_VSERROR CPU_INTERRUPT_TGT_EXT_4
/* The usual mapping for an AArch64 system register to its AArch32
* counterpart is for the 32 bit world to have access to the lower
@@ -105,7 +107,8 @@ enum {
#define ARM_CPU_VIRQ 2
#define ARM_CPU_VFIQ 3
#define ARM_CPU_SERROR 4
-#define ARM_CPU_NUM_IRQ 5
+#define ARM_CPU_VSERROR 5
+#define ARM_CPU_NUM_IRQ 6
/* ARM-specific extra insn start words:
* 1: Conditional execution bits
@@ -1969,7 +1969,11 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
}
- if (!allow_virt || !(hcr_el2 & HCR_AMO)) {
+ if (allow_virt && (hcr_el2 & HCR_AMO)) {
+ if (cs->interrupt_request & CPU_INTERRUPT_VSERROR) {
+ ret |= CPSR_A;
+ }
+ } else {
if (cs->interrupt_request & CPU_INTERRUPT_SERROR) {
ret |= CPSR_A;
}
@@ -5103,6 +5107,7 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
g_assert(qemu_mutex_iothread_locked());
arm_cpu_update_virq(cpu);
arm_cpu_update_vfiq(cpu);
+ arm_cpu_update_vserror(cpu);
}
static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -8605,6 +8610,7 @@ void arm_log_exception(int idx)
[EXCP_LSERR] = "v8M LSERR UsageFault",
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
[EXCP_SERROR] = "SError Interrupt",
+ [EXCP_VSERROR] = "Virtual SError Interrupt",
};
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@@ -9113,6 +9119,17 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
mask = CPSR_A | CPSR_I | CPSR_F;
offset = 0;
break;
+ case EXCP_VSERROR:
+ A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
+ A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
+ qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
+ env->exception.fsr,
+ (uint32_t)env->exception.vaddress);
+ new_mode = ARM_CPU_MODE_ABT;
+ addr = 0x10;
+ mask = CPSR_A | CPSR_I;
+ offset = 8;
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
@@ -9223,6 +9240,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
addr += 0x100;
break;
case EXCP_SERROR:
+ case EXCP_VSERROR:
addr += 0x180;
break;
default:
@@ -1023,6 +1023,16 @@ void arm_cpu_update_virq(ARMCPU *cpu);
*/
void arm_cpu_update_vfiq(ARMCPU *cpu);
+/**
+ * arm_cpu_update_vserror: Update CPU_INTERRUPT_VSERROR interrupt
+ *
+ * Update the CPU_INTERRUPT_VSERROR bit in cs->interrupt_request, following
+ * a change to either the input virtual SError line from the GIC or the
+ * HCR_EL2.VSE bit. Must be called with the iothread lock held.
+ */
+void arm_cpu_update_vserror(ARMCPU *cpu);
+
+
/**
* arm_mmu_idx_el:
* @env: The cpu environment
@@ -715,7 +715,7 @@ static int cpu_post_load(void *opaque, int version_id)
env->irq_line_state = cs->interrupt_request &
(CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ |
- CPU_INTERRUPT_SERROR);
+ CPU_INTERRUPT_SERROR | CPU_INTERRUPT_VSERROR);
}
/* Update the values list from the incoming migration data.
This supports virtual SError injection, which can be used to inject SError to guest running on the emulated hypervisor. The functionality is enabled only when we're in non-secured mode and {HCR.TGE, HCR.AMO} are set to {0, 1}. Also, it can be masked by PState.A bit. Apart from that, the implementation is similar to VFIQ. Signed-off-by: Gavin Shan <gshan@redhat.com> --- target/arm/cpu.c | 48 +++++++++++++++++++++++++++++++++++++++++- target/arm/cpu.h | 13 +++++++----- target/arm/helper.c | 20 +++++++++++++++++- target/arm/internals.h | 10 +++++++++ target/arm/machine.c | 2 +- 5 files changed, 85 insertions(+), 8 deletions(-)