@@ -319,6 +319,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
+static inline bool kvm_vcpu_trap_is_dabt(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW;
+}
+
static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
@@ -278,6 +278,36 @@ struct vcpu_reset_state {
bool reset;
};
+enum arm_exit_reason {
+ ARM_EXIT_UNKNOWN,
+ ARM_EXIT_IRQ,
+ ARM_EXIT_EL1_SERROR,
+ ARM_EXIT_HYP_GONE,
+ ARM_EXIT_IL,
+ ARM_EXIT_WFI,
+ ARM_EXIT_WFE,
+ ARM_EXIT_CP15_32,
+ ARM_EXIT_CP15_64,
+ ARM_EXIT_CP14_32,
+ ARM_EXIT_CP14_LS,
+ ARM_EXIT_CP14_64,
+ ARM_EXIT_HVC32,
+ ARM_EXIT_SMC32,
+ ARM_EXIT_HVC64,
+ ARM_EXIT_SMC64,
+ ARM_EXIT_SYS64,
+ ARM_EXIT_SVE,
+ ARM_EXIT_IABT_LOW,
+ ARM_EXIT_DABT_LOW,
+ ARM_EXIT_SOFTSTP_LOW,
+ ARM_EXIT_WATCHPT_LOW,
+ ARM_EXIT_BREAKPT_LOW,
+ ARM_EXIT_BKPT32,
+ ARM_EXIT_BRK64,
+ ARM_EXIT_FP_ASIMD,
+ ARM_EXIT_PAC,
+};
+
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
void *sve_state;
@@ -384,6 +414,9 @@ struct kvm_vcpu_arch {
u64 last_steal;
gpa_t base;
} steal;
+
+ /* Arch specific exit reason */
+ enum arm_exit_reason exit_reason;
};
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -49,6 +49,18 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
return ret;
}
+static int handle_hvc32(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.exit_reason = ARM_EXIT_HVC32;
+ return handle_hvc(vcpu);
+}
+
+static int handle_hvc64(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.exit_reason = ARM_EXIT_HVC64;
+ return handle_hvc(vcpu);
+}
+
static int handle_smc(struct kvm_vcpu *vcpu)
{
/*
@@ -64,12 +76,25 @@ static int handle_smc(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_smc32(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.exit_reason = ARM_EXIT_SMC32;
+ return handle_smc(vcpu);
+}
+
+static int handle_smc64(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.exit_reason = ARM_EXIT_SMC64;
+ return handle_smc(vcpu);
+}
+
/*
* Guest access to FP/ASIMD registers are routed to this handler only
* when the system doesn't support FP/ASIMD.
*/
static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.exit_reason = ARM_EXIT_FP_ASIMD;
kvm_inject_undefined(vcpu);
return 1;
}
@@ -91,10 +116,12 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++;
+ vcpu->arch.exit_reason = ARM_EXIT_WFE;
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
} else {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++;
+ vcpu->arch.exit_reason = ARM_EXIT_WFI;
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
@@ -119,12 +146,29 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 esr = kvm_vcpu_get_esr(vcpu);
+ u8 esr_ec = ESR_ELx_EC(esr);
run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.hsr = esr;
- if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW)
+ switch (esr_ec) {
+ case ESR_ELx_EC_SOFTSTP_LOW:
+ vcpu->arch.exit_reason = ARM_EXIT_SOFTSTP_LOW;
+ break;
+ case ESR_ELx_EC_WATCHPT_LOW:
run->debug.arch.far = vcpu->arch.fault.far_el2;
+ vcpu->arch.exit_reason = ARM_EXIT_WATCHPT_LOW;
+ break;
+ case ESR_ELx_EC_BREAKPT_LOW:
+ vcpu->arch.exit_reason = ARM_EXIT_BREAKPT_LOW;
+ break;
+ case ESR_ELx_EC_BKPT32:
+ vcpu->arch.exit_reason = ARM_EXIT_BKPT32;
+ break;
+ case ESR_ELx_EC_BRK64:
+ vcpu->arch.exit_reason = ARM_EXIT_BRK64;
+ break;
+ }
return 0;
}
@@ -136,12 +180,14 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
esr, esr_get_class_string(esr));
+ vcpu->arch.exit_reason = ARM_EXIT_UNKNOWN;
kvm_inject_undefined(vcpu);
return 1;
}
static int handle_sve(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.exit_reason = ARM_EXIT_SVE;
/* Until SVE is supported for guests: */
kvm_inject_undefined(vcpu);
return 1;
@@ -154,6 +200,7 @@ static int handle_sve(struct kvm_vcpu *vcpu)
*/
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.exit_reason = ARM_EXIT_PAC;
kvm_inject_undefined(vcpu);
return 1;
}
@@ -166,10 +213,10 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
[ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
[ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
- [ESR_ELx_EC_HVC32] = handle_hvc,
- [ESR_ELx_EC_SMC32] = handle_smc,
- [ESR_ELx_EC_HVC64] = handle_hvc,
- [ESR_ELx_EC_SMC64] = handle_smc,
+ [ESR_ELx_EC_HVC32] = handle_hvc32,
+ [ESR_ELx_EC_SMC32] = handle_smc32,
+ [ESR_ELx_EC_HVC64] = handle_hvc64,
+ [ESR_ELx_EC_SMC64] = handle_smc64,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_ELx_EC_SVE] = handle_sve,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
@@ -230,8 +277,10 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
switch (exception_index) {
case ARM_EXCEPTION_IRQ:
+ vcpu->arch.exit_reason = ARM_EXIT_IRQ;
return 1;
case ARM_EXCEPTION_EL1_SERROR:
+ vcpu->arch.exit_reason = ARM_EXIT_EL1_SERROR;
return 1;
case ARM_EXCEPTION_TRAP:
return handle_trap_exceptions(vcpu);
@@ -240,6 +289,7 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
* EL2 has been reset to the hyp-stub. This happens when a guest
* is pre-empted by kvm_reboot()'s shutdown call.
*/
+ vcpu->arch.exit_reason = ARM_EXIT_HYP_GONE;
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
return 0;
case ARM_EXCEPTION_IL:
@@ -247,11 +297,13 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
* We attempted an illegal exception return. Guest state must
* have been corrupted somehow. Give up.
*/
+ vcpu->arch.exit_reason = ARM_EXIT_IL;
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
return -EINVAL;
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);
+ vcpu->arch.exit_reason = ARM_EXIT_UNKNOWN;
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return 0;
}
@@ -1197,6 +1197,10 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
+ if (is_iabt)
+ vcpu->arch.exit_reason = ARM_EXIT_IABT_LOW;
+ else if (kvm_vcpu_trap_is_dabt(vcpu))
+ vcpu->arch.exit_reason = ARM_EXIT_DABT_LOW;
/* Synchronous External Abort? */
if (kvm_vcpu_abt_issea(vcpu)) {
@@ -2158,6 +2158,7 @@ static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.exit_reason = ARM_EXIT_CP14_LS;
kvm_inject_undefined(vcpu);
return 1;
}
@@ -2325,21 +2326,25 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.exit_reason = ARM_EXIT_CP15_64;
return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
}
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.exit_reason = ARM_EXIT_CP15_32;
return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
}
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.exit_reason = ARM_EXIT_CP14_64;
return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
}
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.exit_reason = ARM_EXIT_CP14_32;
return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs));
}
@@ -2397,6 +2402,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
int ret;
trace_kvm_handle_sys_reg(esr);
+ vcpu->arch.exit_reason = ARM_EXIT_SYS64;
params = esr_sys64_to_params(esr);
params.regval = vcpu_get_reg(vcpu, Rt);
Arch specific exit reasons have been available for other architectures. Add arch specific exit reason support for ARM64, which would be used in KVM stats for monitoring VCPU status. Signed-off-by: Jing Zhang <jingzhangos@google.com> --- arch/arm64/include/asm/kvm_emulate.h | 5 +++ arch/arm64/include/asm/kvm_host.h | 33 +++++++++++++++ arch/arm64/kvm/handle_exit.c | 62 +++++++++++++++++++++++++--- arch/arm64/kvm/mmu.c | 4 ++ arch/arm64/kvm/sys_regs.c | 6 +++ 5 files changed, 105 insertions(+), 5 deletions(-)