@@ -101,4 +101,6 @@ bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
void kvm_reset_pvm_sys_regs(struct kvm_vcpu *vcpu);
int kvm_check_pvm_sysreg_table(void);
+bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
+
#endif /* __ARM64_KVM_NVHE_PKVM_H__ */
@@ -4,6 +4,8 @@
* Author: Andrew Scull <ascull@google.com>
*/
+#include <kvm/arm_hypercalls.h>
+
#include <hyp/adjust_pc.h>
#include <asm/pgtable-types.h>
@@ -42,6 +44,13 @@ static void handle_pvm_entry_wfx(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *sh
KVM_ARM64_INCREMENT_PC;
}
+static void handle_pvm_entry_hvc64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
+{
+ u64 ret = READ_ONCE(host_vcpu->arch.ctxt.regs.regs[0]);
+
+ vcpu_set_reg(shadow_vcpu, 0, ret);
+}
+
static void handle_pvm_entry_sys64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
{
unsigned long host_flags;
@@ -195,6 +204,19 @@ static void handle_pvm_exit_sys64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *s
}
}
+static void handle_pvm_exit_hvc64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
+{
+ int i;
+
+ WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
+ shadow_vcpu->arch.fault.esr_el2);
+
+ /* Pass the hvc function id (r0) as well as any potential arguments. */
+ for (i = 0; i < 8; i++)
+ WRITE_ONCE(host_vcpu->arch.ctxt.regs.regs[i],
+ vcpu_get_reg(shadow_vcpu, i));
+}
+
static void handle_pvm_exit_iabt(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
{
WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
@@ -273,6 +295,7 @@ static void handle_vm_exit_abt(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shad
static const shadow_entry_exit_handler_fn entry_pvm_shadow_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_WFx] = handle_pvm_entry_wfx,
+ [ESR_ELx_EC_HVC64] = handle_pvm_entry_hvc64,
[ESR_ELx_EC_SYS64] = handle_pvm_entry_sys64,
[ESR_ELx_EC_IABT_LOW] = handle_pvm_entry_iabt,
[ESR_ELx_EC_DABT_LOW] = handle_pvm_entry_dabt,
@@ -281,6 +304,7 @@ static const shadow_entry_exit_handler_fn entry_pvm_shadow_handlers[] = {
static const shadow_entry_exit_handler_fn exit_pvm_shadow_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_WFx] = handle_pvm_exit_wfx,
+ [ESR_ELx_EC_HVC64] = handle_pvm_exit_hvc64,
[ESR_ELx_EC_SYS64] = handle_pvm_exit_sys64,
[ESR_ELx_EC_IABT_LOW] = handle_pvm_exit_iabt,
[ESR_ELx_EC_DABT_LOW] = handle_pvm_exit_dabt,
@@ -7,6 +7,8 @@
#include <linux/kvm_host.h>
#include <linux/mm.h>
+#include <kvm/arm_hypercalls.h>
+
#include <asm/kvm_emulate.h>
#include <nvhe/mem_protect.h>
@@ -797,3 +799,23 @@ int __pkvm_teardown_shadow(unsigned int shadow_handle)
hyp_spin_unlock(&shadow_lock);
return err;
}
+
+/*
+ * Handler for protected VM HVC calls.
+ *
+ * Returns true if the hypervisor has handled the exit, and control should go
+ * back to the guest, or false if it hasn't.
+ */
+bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ u32 fn = smccc_get_function(vcpu);
+
+ switch (fn) {
+ case ARM_SMCCC_VERSION_FUNC_ID:
+ /* Nothing to be handled by the host. Go back to the guest. */
+ smccc_set_retval(vcpu, ARM_SMCCC_VERSION_1_1, 0, 0, 0);
+ return true;
+ default:
+ return false;
+ }
+}
@@ -205,6 +205,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
static const exit_handler_fn pvm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
+ [ESR_ELx_EC_HVC64] = kvm_handle_pvm_hvc64,
[ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
[ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted,
[ESR_ELx_EC_FP_ASIMD] = kvm_handle_pvm_fpsimd,