@@ -645,6 +645,11 @@ typedef struct CPUARMState {
const struct arm_boot_info *boot_info;
/* Store GICv3CPUState to access from this struct */
void *gicv3state;
+ struct {
+ uint32_t pending;
+ uint32_t has_esr;
+ uint64_t esr;
+ } serror;
} CPUARMState;
/**
@@ -1486,6 +1491,7 @@ enum arm_features {
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
+ ARM_FEATURE_RAS_EXT, /* has RAS Extension */
};
static inline int arm_feature(CPUARMState *env, int feature)
@@ -527,6 +527,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
unset_feature(&env->features, ARM_FEATURE_PMU);
}
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_INJECT_SERROR_ESR)) {
+ set_feature(&env->features, ARM_FEATURE_RAS_EXT);
+ }
+
/* Do KVM_ARM_VCPU_INIT ioctl */
ret = kvm_arm_vcpu_init(cs);
if (ret) {
@@ -600,6 +604,50 @@ int kvm_arm_cpreg_level(uint64_t regidx)
#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+static int kvm_put_vcpu_events(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ struct kvm_vcpu_events events = {};
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ memset(&events, 0, sizeof(events));
+ events.exception.serror_pending = env->serror.pending;
+
+ if (arm_feature(env, ARM_FEATURE_RAS_EXT)) {
+ events.exception.serror_has_esr = env->serror.has_esr;
+ events.exception.serror_esr = env->serror.esr;
+ }
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
+}
+
+static int kvm_get_vcpu_events(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ struct kvm_vcpu_events events;
+ int ret;
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ memset(&events, 0, sizeof(events));
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ env->serror.pending = events.exception.serror_pending;
+ env->serror.has_esr = events.exception.serror_has_esr;
+ env->serror.esr = events.exception.serror_esr;
+
+ return 0;
+}
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
struct kvm_one_reg reg;
@@ -727,6 +775,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
+ ret = kvm_put_vcpu_events(cpu);
+ if (ret) {
+ printf("return error kvm_put_vcpu_events: %d\n", ret);
+ return ret;
+ }
+
if (!write_list_to_kvmstate(cpu, level)) {
return EINVAL;
}
@@ -863,6 +917,11 @@ int kvm_arch_get_registers(CPUState *cs)
}
vfp_set_fpcr(env, fpr);
+ ret = kvm_get_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
if (!write_kvmstate_to_list(cpu)) {
return EINVAL;
}
@@ -172,6 +172,27 @@ static const VMStateDescription vmstate_sve = {
};
#endif /* AARCH64 */
+static bool ras_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_RAS_EXT);
+}
+
+static const VMStateDescription vmstate_ras = {
+ .name = "cpu/ras",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ras_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.serror.pending, ARMCPU),
+ VMSTATE_UINT32(env.serror.has_esr, ARMCPU),
+ VMSTATE_UINT64(env.serror.esr, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool m_needed(void *opaque)
{
ARMCPU *cpu = opaque;
@@ -723,6 +744,7 @@ const VMStateDescription vmstate_arm_cpu = {
#ifdef TARGET_AARCH64
&vmstate_sve,
#endif
+ &vmstate_ras,
NULL
}
};
This patch extends the qemu-kvm state sync logic with support for KVM_GET/SET_VCPU_EVENTS, giving access to yet missing SError exception. And also it can support the exception state migration. Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com> --- change since v4: 1. Rebase the code to latest change since v3: 1. Add a new new subsection with a suitable 'ras_needed' function controlling whether it is present 2. Add a ARM_FEATURE_RAS feature bit for CPUARMState --- target/arm/cpu.h | 6 ++++++ target/arm/kvm64.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++ target/arm/machine.c | 22 ++++++++++++++++++++ 3 files changed, 87 insertions(+)