@@ -645,6 +645,11 @@ typedef struct CPUARMState {
const struct arm_boot_info *boot_info;
/* Store GICv3CPUState to access from this struct */
void *gicv3state;
+ struct {
+ uint32_t pending;
+ uint32_t has_esr;
+ uint64_t esr;
+ } serror;
} CPUARMState;
/**
@@ -600,6 +600,59 @@ int kvm_arm_cpreg_level(uint64_t regidx)
#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+static bool kvm_can_set_vcpu_esr(ARMCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ int ret = kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_INJECT_SERROR_ESR);
+ return (ret) ? true : false;
+}
+
+
+static int kvm_put_vcpu_events(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ struct kvm_vcpu_events events = {};
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ memset(&events, 0, sizeof(events));
+ events.exception.serror_pending = env->serror.pending;
+
+ if (kvm_can_set_vcpu_esr(cpu)) {
+ events.exception.serror_has_esr = env->serror.has_esr;
+ events.exception.serror_esr = env->serror.esr;
+ }
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
+}
+
+static int kvm_get_vcpu_events(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ struct kvm_vcpu_events events;
+ int ret;
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ memset(&events, 0, sizeof(events));
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ env->serror.pending = events.exception.serror_pending;
+ env->serror.has_esr = events.exception.serror_has_esr;
+ env->serror.esr = events.exception.serror_esr;
+
+ return 0;
+}
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
struct kvm_one_reg reg;
@@ -727,6 +780,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
+ ret = kvm_put_vcpu_events(cpu);
+ if (ret) {
+ printf("return error kvm_put_vcpu_events: %d\n", ret);
+ return ret;
+ }
+
if (!write_list_to_kvmstate(cpu, level)) {
return EINVAL;
}
@@ -863,6 +922,11 @@ int kvm_arch_get_registers(CPUState *cs)
}
vfp_set_fpcr(env, fpr);
+ ret = kvm_get_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
if (!write_kvmstate_to_list(cpu)) {
return EINVAL;
}
@@ -695,6 +695,9 @@ const VMStateDescription vmstate_arm_cpu = {
VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
VMSTATE_UINT32(env.exception.fsr, ARMCPU),
VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
+ VMSTATE_UINT32(env.serror.pending, ARMCPU),
+ VMSTATE_UINT32(env.serror.has_esr, ARMCPU),
+ VMSTATE_UINT64(env.serror.esr, ARMCPU),
VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
{
This patch extends the qemu-kvm state sync logic with support for KVM_GET/SET_VCPU_EVENTS, giving access to yet missing SError exception. And also it can support the exception state migration. Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com> --- target/arm/cpu.h | 5 ++++ target/arm/kvm64.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++ target/arm/machine.c | 3 +++ 3 files changed, 72 insertions(+)