diff mbox series

[v2,03/14] arm64: kvm: Formalize hypcall ABI

Message ID 20200515105841.73532-4-dbrazdil@google.com (mailing list archive)
State New, archived
Headers show
Series Split off nVHE hyp code | expand

Commit Message

David Brazdil May 15, 2020, 10:58 a.m. UTC
In preparation for unmapping .hyp.text from kernel memory mappings, convert the
current EL1 - EL2 KVM ABI to use hypercall numbers in lieu of direct function
pointers. While this in itself does not provide any isolation, it is a first
step towards having a well-defined EL2 ABI.

The implementation is based on a jump table to known host HVC handlers indexed
by the hypercall ID. Relative-offset branches were chosen over
a sys_call_table-like array of function pointers to avoid the need for
re-computing the addresses under hyp memory mappings.

Hypcall IDs start at 0x1000 because comments in hyp.S state that lower IDs
are allocated for hyp stub operations. This was not originally honored by
hyp-entry.S, only the actually used IDs would be recognized and all other
values would be treated as function pointers. This is cleaned up and all IDs
lower than 0x1000 are routed to __kvm_handle_stub_hvc.

Signed-off-by: David Brazdil <dbrazdil@google.com>
---
 arch/arm64/include/asm/kvm_host.h            | 10 ++--
 arch/arm64/include/asm/kvm_host_hypercalls.h | 59 ++++++++++++++++++++
 arch/arm64/kvm/hyp.S                         | 18 +++---
 arch/arm64/kvm/hyp/hyp-entry.S               | 56 +++++++++++--------
 4 files changed, 107 insertions(+), 36 deletions(-)
 create mode 100644 arch/arm64/include/asm/kvm_host_hypercalls.h
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 32c8a675e5a4..132233b6d853 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -24,6 +24,7 @@ 
 #include <asm/fpsimd.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
+#include <asm/kvm_host_hypercalls.h>
 #include <asm/thread_info.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -446,7 +447,7 @@  int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
 
-u64 __kvm_call_hyp(void *hypfn, ...);
+u64 __kvm_call_hyp(unsigned long hcall_id, ...);
 
 /*
  * The couple of isb() below are there to guarantee the same behaviour
@@ -459,7 +460,8 @@  u64 __kvm_call_hyp(void *hypfn, ...);
 			f(__VA_ARGS__);					\
 			isb();						\
 		} else {						\
-			__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+			__kvm_call_hyp(KVM_HOST_HCALL_ID(f),		\
+				       ##__VA_ARGS__);			\
 		}							\
 	} while(0)
 
@@ -471,7 +473,7 @@  u64 __kvm_call_hyp(void *hypfn, ...);
 			ret = f(__VA_ARGS__);				\
 			isb();						\
 		} else {						\
-			ret = __kvm_call_hyp(kvm_ksym_ref(f),		\
+			ret = __kvm_call_hyp(KVM_HOST_HCALL_ID(f),	\
 					     ##__VA_ARGS__);		\
 		}							\
 									\
@@ -551,7 +553,7 @@  static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
 	 * cpus_have_const_cap() wrapper.
 	 */
 	BUG_ON(!system_capabilities_finalized());
-	__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
+	__kvm_call_hyp((unsigned long)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
 
 	/*
 	 * Disabling SSBD on a non-VHE system requires us to enable SSBS
diff --git a/arch/arm64/include/asm/kvm_host_hypercalls.h b/arch/arm64/include/asm/kvm_host_hypercalls.h
new file mode 100644
index 000000000000..ed02878fbda5
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_host_hypercalls.h
@@ -0,0 +1,59 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, inc
+ */
+
+#ifndef __KVM_HOST_HCALL
+#define __KVM_HOST_HCALL(x)
+#endif
+
+#define __KVM_HOST_HCALL_ID___kvm_enable_ssbs			0
+__KVM_HOST_HCALL(__kvm_enable_ssbs)
+
+#define __KVM_HOST_HCALL_ID___kvm_get_mdcr_el2			1
+__KVM_HOST_HCALL(__kvm_get_mdcr_el2)
+
+#define __KVM_HOST_HCALL_ID___kvm_timer_set_cntvoff		2
+__KVM_HOST_HCALL(__kvm_timer_set_cntvoff)
+
+#define __KVM_HOST_HCALL_ID___kvm_tlb_flush_local_vmid		3
+__KVM_HOST_HCALL(__kvm_tlb_flush_local_vmid)
+
+#define __KVM_HOST_HCALL_ID___kvm_flush_vm_context		4
+__KVM_HOST_HCALL(__kvm_flush_vm_context)
+
+#define __KVM_HOST_HCALL_ID___kvm_vcpu_run_nvhe			5
+__KVM_HOST_HCALL(__kvm_vcpu_run_nvhe)
+
+#define __KVM_HOST_HCALL_ID___kvm_tlb_flush_vmid		6
+__KVM_HOST_HCALL(__kvm_tlb_flush_vmid)
+
+#define __KVM_HOST_HCALL_ID___kvm_tlb_flush_vmid_ipa		7
+__KVM_HOST_HCALL(__kvm_tlb_flush_vmid_ipa)
+
+#define __KVM_HOST_HCALL_ID___vgic_v3_init_lrs			8
+__KVM_HOST_HCALL(__vgic_v3_init_lrs)
+
+#define __KVM_HOST_HCALL_ID___vgic_v3_get_ich_vtr_el2		9
+__KVM_HOST_HCALL(__vgic_v3_get_ich_vtr_el2)
+
+#define __KVM_HOST_HCALL_ID___vgic_v3_write_vmcr		10
+__KVM_HOST_HCALL(__vgic_v3_write_vmcr)
+
+#define __KVM_HOST_HCALL_ID___vgic_v3_restore_aprs		11
+__KVM_HOST_HCALL(__vgic_v3_restore_aprs)
+
+#define __KVM_HOST_HCALL_ID___vgic_v3_read_vmcr			12
+__KVM_HOST_HCALL(__vgic_v3_read_vmcr)
+
+#define __KVM_HOST_HCALL_ID___vgic_v3_save_aprs			13
+__KVM_HOST_HCALL(__vgic_v3_save_aprs)
+
+#define KVM_HOST_HCALL_NR					14
+
+/*
+ * Offset KVM hypercall IDs to avoid clashing with stub hypercalls
+ * (defined in asm/virt.h).
+ */
+#define KVM_HOST_HCALL_BASE 	(0x1000UL)
+#define KVM_HOST_HCALL_ID(name) (KVM_HOST_HCALL_BASE + __KVM_HOST_HCALL_ID_##name)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 3c79a1124af2..f603d03cb599 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -11,22 +11,20 @@ 
 #include <asm/cpufeature.h>
 
 /*
- * u64 __kvm_call_hyp(void *hypfn, ...);
+ * u64 __kvm_call_hyp(unsigned long hcall_id, ...);
  *
  * This is not really a variadic function in the classic C-way and care must
  * be taken when calling this to ensure parameters are passed in registers
  * only, since the stack will change between the caller and the callee.
  *
- * Call the function with the first argument containing a pointer to the
- * function you wish to call in Hyp mode, and subsequent arguments will be
- * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
- * function pointer can be passed).  The function being called must be mapped
- * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
- * passed in x0.
+ * Call the function with the first argument containing ID of the function
+ * you wish to call in Hyp mode, as defined in kvm_host_hypercalls.h, and
+ * subsequent arguments will be passed as x0, x1, and x2 (a maximum of
+ * 3 arguments in addition to the hypcall ID can be passed). Return values
+ * are passed in x0.
  *
- * A function pointer with a value less than 0xfff has a special meaning,
- * and is used to implement hyp stubs in the same way as in
- * arch/arm64/kernel/hyp_stub.S.
+ * Hypcalls with ID less than 0x1000 are propagated to operations implemented
+ * in arch/arm64/kernel/hyp_stub.S.
  */
 SYM_FUNC_START(__kvm_call_hyp)
 	hvc	#0
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 65ff99a7e02d..ab14de8d0131 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -13,25 +13,12 @@ 
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
+#include <asm/kvm_host_hypercalls.h>
 #include <asm/mmu.h>
 
 	.text
 	.pushsection	.hyp.text, "ax"
 
-.macro do_el2_call
-	/*
-	 * Shuffle the parameters before calling the function
-	 * pointed to in x0. Assumes parameters in x[1,2,3].
-	 */
-	str	lr, [sp, #-16]!
-	mov	lr, x0
-	mov	x0, x1
-	mov	x1, x2
-	mov	x2, x3
-	blr	lr
-	ldr	lr, [sp], #16
-.endm
-
 el1_sync:				// Guest trapped into EL2
 
 	mrs	x0, esr_el2
@@ -46,9 +33,9 @@  el1_sync:				// Guest trapped into EL2
 	/* Here, we're pretty sure the host called HVC. */
 	ldp	x0, x1, [sp], #16
 
-	/* Check for a stub HVC call */
-	cmp	x0, #HVC_STUB_HCALL_NR
-	b.hs	1f
+	/* Check if hcall ID (x0) is in the hyp stub hypercall range. */
+	cmp	x0, #KVM_HOST_HCALL_BASE
+	b.hs	el1_host_hcall
 
 	/*
 	 * Compute the idmap address of __kvm_handle_stub_hvc and
@@ -65,13 +52,38 @@  el1_sync:				// Guest trapped into EL2
 	sub	x5, x5, x6
 	br	x5
 
+el1_host_hcall:
+	/* Check if hcall ID (x0) is in the KVM host hypercall range. */
+	sub	x0, x0, #KVM_HOST_HCALL_BASE
+	cmp	x0, #KVM_HOST_HCALL_NR
+	b.hs	el1_host_invalid_hvc
+
+	/* Compute address of corresponding branch in the jump table below. */
+	adr	x10, 1f
+	add	x10, x10, x0, lsl #2
+
+	/* Call the host HVC handler. Arguments are in x[1,2,3]. */
+	mov	x0, x1
+	mov	x1, x2
+	mov	x2, x3
+	str	lr, [sp, #-16]!
+	adr	lr, 2f
+	br	x10
+
+	/* Generate jump table of branches to all defined host HVC handlers. */
 1:
-	/*
-	 * Perform the EL2 call
-	 */
-	kern_hyp_va	x0
-	do_el2_call
+#undef __KVM_HOST_HCALL
+#define __KVM_HOST_HCALL(hcall_fn_name) \
+	b	hcall_fn_name
+#include <asm/kvm_host_hypercalls.h>
+
+2:
+	ldr	lr, [sp], #16
+	eret
+	sb
 
+el1_host_invalid_hvc:
+	mov	x0, -ENOSYS
 	eret
 	sb