@@ -25,6 +25,9 @@ struct kvm_shadow_vcpu_state {
/* A pointer to the shadow vm. */
struct kvm_shadow_vm *shadow_vm;
+ /* Tracks exit code for the protected guest. */
+ u32 exit_code;
+
/*
* Points to the per-cpu pointer of the cpu where it's loaded, or NULL
* if not loaded.
@@ -24,6 +24,51 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
+typedef void (*shadow_entry_exit_handler_fn)(struct kvm_vcpu *, struct kvm_vcpu *);
+
+static void handle_vm_entry_generic(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
+{
+ unsigned long host_flags = READ_ONCE(host_vcpu->arch.flags);
+
+ shadow_vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
+ KVM_ARM64_EXCEPT_MASK);
+
+ if (host_flags & KVM_ARM64_PENDING_EXCEPTION) {
+ shadow_vcpu->arch.flags |= KVM_ARM64_PENDING_EXCEPTION;
+ shadow_vcpu->arch.flags |= host_flags & KVM_ARM64_EXCEPT_MASK;
+ } else if (host_flags & KVM_ARM64_INCREMENT_PC) {
+ shadow_vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
+ }
+}
+
+static void handle_vm_exit_generic(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
+{
+ WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
+ shadow_vcpu->arch.fault.esr_el2);
+}
+
+static void handle_vm_exit_abt(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
+{
+ WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
+ shadow_vcpu->arch.fault.esr_el2);
+ WRITE_ONCE(host_vcpu->arch.fault.far_el2,
+ shadow_vcpu->arch.fault.far_el2);
+ WRITE_ONCE(host_vcpu->arch.fault.hpfar_el2,
+ shadow_vcpu->arch.fault.hpfar_el2);
+ WRITE_ONCE(host_vcpu->arch.fault.disr_el1,
+ shadow_vcpu->arch.fault.disr_el1);
+}
+
+static const shadow_entry_exit_handler_fn entry_vm_shadow_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = handle_vm_entry_generic,
+};
+
+static const shadow_entry_exit_handler_fn exit_vm_shadow_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = handle_vm_exit_generic,
+ [ESR_ELx_EC_IABT_LOW] = handle_vm_exit_abt,
+ [ESR_ELx_EC_DABT_LOW] = handle_vm_exit_abt,
+};
+
static void flush_vgic_state(struct kvm_vcpu *host_vcpu,
struct kvm_vcpu *shadow_vcpu)
{
@@ -99,6 +144,8 @@ static void flush_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
{
struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
struct kvm_vcpu *host_vcpu = shadow_state->host_vcpu;
+ shadow_entry_exit_handler_fn ec_handler;
+ u8 esr_ec;
shadow_vcpu->arch.ctxt = host_vcpu->arch.ctxt;
@@ -109,8 +156,6 @@ static void flush_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
shadow_vcpu->arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
shadow_vcpu->arch.cptr_el2 = host_vcpu->arch.cptr_el2;
- shadow_vcpu->arch.flags = host_vcpu->arch.flags;
-
shadow_vcpu->arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
shadow_vcpu->arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
@@ -118,24 +163,62 @@ static void flush_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
flush_vgic_state(host_vcpu, shadow_vcpu);
flush_timer_state(shadow_state);
+
+ switch (ARM_EXCEPTION_CODE(shadow_state->exit_code)) {
+ case ARM_EXCEPTION_IRQ:
+ case ARM_EXCEPTION_EL1_SERROR:
+ case ARM_EXCEPTION_IL:
+ break;
+ case ARM_EXCEPTION_TRAP:
+ esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(shadow_vcpu));
+ ec_handler = entry_vm_shadow_handlers[esr_ec];
+ if (ec_handler)
+ ec_handler(host_vcpu, shadow_vcpu);
+ break;
+ default:
+ BUG();
+ }
+
+ shadow_state->exit_code = 0;
}
-static void sync_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
+static void sync_shadow_state(struct kvm_shadow_vcpu_state *shadow_state,
+ u32 exit_reason)
{
struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
struct kvm_vcpu *host_vcpu = shadow_state->host_vcpu;
+ shadow_entry_exit_handler_fn ec_handler;
+ unsigned long host_flags;
+ u8 esr_ec;
host_vcpu->arch.ctxt = shadow_vcpu->arch.ctxt;
host_vcpu->arch.hcr_el2 = shadow_vcpu->arch.hcr_el2;
host_vcpu->arch.cptr_el2 = shadow_vcpu->arch.cptr_el2;
- host_vcpu->arch.fault = shadow_vcpu->arch.fault;
-
- host_vcpu->arch.flags = shadow_vcpu->arch.flags;
-
sync_vgic_state(host_vcpu, shadow_vcpu);
sync_timer_state(shadow_state);
+
+ switch (ARM_EXCEPTION_CODE(exit_reason)) {
+ case ARM_EXCEPTION_IRQ:
+ break;
+ case ARM_EXCEPTION_TRAP:
+ esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(shadow_vcpu));
+ ec_handler = exit_vm_shadow_handlers[esr_ec];
+ if (ec_handler)
+ ec_handler(host_vcpu, shadow_vcpu);
+ break;
+ case ARM_EXCEPTION_EL1_SERROR:
+ case ARM_EXCEPTION_IL:
+ break;
+ default:
+ BUG();
+ }
+
+ host_flags = READ_ONCE(host_vcpu->arch.flags) &
+ ~(KVM_ARM64_PENDING_EXCEPTION | KVM_ARM64_INCREMENT_PC);
+ WRITE_ONCE(host_vcpu->arch.flags, host_flags);
+ shadow_state->exit_code = exit_reason;
}
static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
@@ -231,7 +314,7 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
ret = __kvm_vcpu_run(&shadow_state->shadow_vcpu);
- sync_shadow_state(shadow_state);
+ sync_shadow_state(shadow_state, ret);
} else {
ret = __kvm_vcpu_run(vcpu);
}