diff mbox series

[v3,04/15] arm64: kvm: Handle calls to prefixed hyp functions

Message ID 20200618122537.9625-5-dbrazdil@google.com (mailing list archive)
State New, archived
Headers show
Series Split off nVHE hyp code | expand

Commit Message

David Brazdil June 18, 2020, 12:25 p.m. UTC
From: Andrew Scull <ascull@google.com>

This patch is part of a series which builds KVM's non-VHE hyp code separately
from VHE and the rest of the kernel.

Once hyp functions are moved to a hyp object, they will have prefixed symbols.
This change declares and gets the address of the prefixed version for calls to
the hyp functions.

To aid migration, the hyp functions that have not yet moved have their prefixed
versions aliased to their non-prefixed version. This begins with all the hyp
functions being listed and will reduce to none of them once the migration is
complete.

Signed-off-by: Andrew Scull <ascull@google.com>

Extracted kvm_call_hyp nVHE branches into own helper macros.
Signed-off-by: David Brazdil <dbrazdil@google.com>
---
 arch/arm64/include/asm/kvm_asm.h  | 19 +++++++++++++++++++
 arch/arm64/include/asm/kvm_host.h | 19 ++++++++++++++++---
 arch/arm64/kernel/image-vars.h    | 15 +++++++++++++++
 3 files changed, 50 insertions(+), 3 deletions(-)

Comments

Marc Zyngier June 18, 2020, 4:51 p.m. UTC | #1
Hi David,

On 2020-06-18 13:25, David Brazdil wrote:
> From: Andrew Scull <ascull@google.com>
> 
> This patch is part of a series which builds KVM's non-VHE hyp code 
> separately
> from VHE and the rest of the kernel.
> 
> Once hyp functions are moved to a hyp object, they will have prefixed 
> symbols.
> This change declares and gets the address of the prefixed version for 
> calls to
> the hyp functions.
> 
> To aid migration, the hyp functions that have not yet moved have their 
> prefixed
> versions aliased to their non-prefixed version. This begins with all 
> the hyp
> functions being listed and will reduce to none of them once the 
> migration is
> complete.
> 
> Signed-off-by: Andrew Scull <ascull@google.com>
> 
> Extracted kvm_call_hyp nVHE branches into own helper macros.
> Signed-off-by: David Brazdil <dbrazdil@google.com>

nit: if you want to add this kind of comment, try to write it between
square brackets, without blank lines in between:

Signed-off-by: Andrew Scull <ascull@google.com>
[David: Extracted kvm_call_hyp nVHE branches into own helper macros.]
Signed-off-by: David Brazdil <dbrazdil@google.com>

> ---
>  arch/arm64/include/asm/kvm_asm.h  | 19 +++++++++++++++++++
>  arch/arm64/include/asm/kvm_host.h | 19 ++++++++++++++++---
>  arch/arm64/kernel/image-vars.h    | 15 +++++++++++++++
>  3 files changed, 50 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_asm.h 
> b/arch/arm64/include/asm/kvm_asm.h
> index 352aaebf4198..6a682d66a640 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -42,6 +42,24 @@
> 
>  #include <linux/mm.h>
> 
> +/*
> + * Translate name of a symbol defined in nVHE hyp to the name seen
> + * by kernel proper. All nVHE symbols are prefixed by the build system
> + * to avoid clashes with the VHE variants.
> + */
> +#define kvm_nvhe_sym(sym)	__kvm_nvhe_##sym
> +
> +#define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
> +#define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
> +
> +/*
> + * Define a pair of symbols sharing the same name but one defined in
> + * VHE and the other in nVHE hyp implementations.
> + */
> +#define DECLARE_KVM_HYP_SYM(sym)		\
> +	DECLARE_KVM_VHE_SYM(sym);		\
> +	DECLARE_KVM_NVHE_SYM(sym)
> +
>  /* Translate a kernel address of @sym into its equivalent linear 
> mapping */
>  #define kvm_ksym_ref(sym)						\
>  	({								\
> @@ -50,6 +68,7 @@
>  			val = lm_alias(&sym);				\
>  		val;							\
>  	 })
> +#define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))
> 
>  struct kvm;
>  struct kvm_vcpu;
> diff --git a/arch/arm64/include/asm/kvm_host.h
> b/arch/arm64/include/asm/kvm_host.h
> index c3e6fcc664b1..e782f98243d3 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -448,6 +448,20 @@ void kvm_arm_resume_guest(struct kvm *kvm);
> 
>  u64 __kvm_call_hyp(void *hypfn, ...);
> 
> +#define kvm_call_hyp_nvhe(f, ...)					\
> +	do {								\
> +		DECLARE_KVM_NVHE_SYM(f);				\

I wanted to move this out to __kvm_call_hyp, but the nVHE ssbs code
got in the way... Oh well.

> +		__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);	\
> +	} while(0)
> +
> +#define kvm_call_hyp_nvhe_ret(f, ...)					\
> +	({								\
> +		DECLARE_KVM_NVHE_SYM(f);				\
> +		typeof(f(__VA_ARGS__)) ret;				\
> +		ret = __kvm_call_hyp(kvm_ksym_ref_nvhe(f),		\
> +				     ##__VA_ARGS__);			\

You don't need to redefine ret here. actually, you can just evaluate
the expression and let C do its magic:

diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index e782f98243d3..49d1a5cd8f8f 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -457,9 +457,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
  #define kvm_call_hyp_nvhe_ret(f, ...)					\
  	({								\
  		DECLARE_KVM_NVHE_SYM(f);				\
-		typeof(f(__VA_ARGS__)) ret;				\
-		ret = __kvm_call_hyp(kvm_ksym_ref_nvhe(f),		\
-				     ##__VA_ARGS__);			\
+		__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);	\
  	})

  /*

> +	})
> +
>  /*
>   * The couple of isb() below are there to guarantee the same behaviour
>   * on VHE as on !VHE, where the eret to EL1 acts as a context
> @@ -459,7 +473,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
>  			f(__VA_ARGS__);					\
>  			isb();						\
>  		} else {						\
> -			__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
> +			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
>  		}							\
>  	} while(0)
> 
> @@ -471,8 +485,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
>  			ret = f(__VA_ARGS__);				\
>  			isb();						\
>  		} else {						\
> -			ret = __kvm_call_hyp(kvm_ksym_ref(f),		\
> -					     ##__VA_ARGS__);		\
> +			ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__);	\
>  		}							\
>  									\
>  		ret;							\
> diff --git a/arch/arm64/kernel/image-vars.h 
> b/arch/arm64/kernel/image-vars.h
> index f32b406e90c0..89affa38b143 100644
> --- a/arch/arm64/kernel/image-vars.h
> +++ b/arch/arm64/kernel/image-vars.h
> @@ -61,6 +61,21 @@ __efistub__ctype		= _ctype;
>   * memory mappings.
>   */
> 
> +__kvm_nvhe___kvm_enable_ssbs = __kvm_enable_ssbs;
> +__kvm_nvhe___kvm_flush_vm_context = __kvm_flush_vm_context;
> +__kvm_nvhe___kvm_get_mdcr_el2 = __kvm_get_mdcr_el2;
> +__kvm_nvhe___kvm_timer_set_cntvoff = __kvm_timer_set_cntvoff;
> +__kvm_nvhe___kvm_tlb_flush_local_vmid = __kvm_tlb_flush_local_vmid;
> +__kvm_nvhe___kvm_tlb_flush_vmid = __kvm_tlb_flush_vmid;
> +__kvm_nvhe___kvm_tlb_flush_vmid_ipa = __kvm_tlb_flush_vmid_ipa;
> +__kvm_nvhe___kvm_vcpu_run_nvhe = __kvm_vcpu_run_nvhe;
> +__kvm_nvhe___vgic_v3_get_ich_vtr_el2 = __vgic_v3_get_ich_vtr_el2;
> +__kvm_nvhe___vgic_v3_init_lrs = __vgic_v3_init_lrs;
> +__kvm_nvhe___vgic_v3_read_vmcr = __vgic_v3_read_vmcr;
> +__kvm_nvhe___vgic_v3_restore_aprs = __vgic_v3_restore_aprs;
> +__kvm_nvhe___vgic_v3_save_aprs = __vgic_v3_save_aprs;
> +__kvm_nvhe___vgic_v3_write_vmcr = __vgic_v3_write_vmcr;
> +
>  #endif /* CONFIG_KVM */
> 
>  #endif /* __ARM64_KERNEL_IMAGE_VARS_H */

Otherwise looks good.

Thanks,

         M.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 352aaebf4198..6a682d66a640 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -42,6 +42,24 @@ 
 
 #include <linux/mm.h>
 
+/*
+ * Translate name of a symbol defined in nVHE hyp to the name seen
+ * by kernel proper. All nVHE symbols are prefixed by the build system
+ * to avoid clashes with the VHE variants.
+ */
+#define kvm_nvhe_sym(sym)	__kvm_nvhe_##sym
+
+#define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
+#define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
+
+/*
+ * Define a pair of symbols sharing the same name but one defined in
+ * VHE and the other in nVHE hyp implementations.
+ */
+#define DECLARE_KVM_HYP_SYM(sym)		\
+	DECLARE_KVM_VHE_SYM(sym);		\
+	DECLARE_KVM_NVHE_SYM(sym)
+
 /* Translate a kernel address of @sym into its equivalent linear mapping */
 #define kvm_ksym_ref(sym)						\
 	({								\
@@ -50,6 +68,7 @@ 
 			val = lm_alias(&sym);				\
 		val;							\
 	 })
+#define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))
 
 struct kvm;
 struct kvm_vcpu;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c3e6fcc664b1..e782f98243d3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -448,6 +448,20 @@  void kvm_arm_resume_guest(struct kvm *kvm);
 
 u64 __kvm_call_hyp(void *hypfn, ...);
 
+#define kvm_call_hyp_nvhe(f, ...)					\
+	do {								\
+		DECLARE_KVM_NVHE_SYM(f);				\
+		__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);	\
+	} while(0)
+
+#define kvm_call_hyp_nvhe_ret(f, ...)					\
+	({								\
+		DECLARE_KVM_NVHE_SYM(f);				\
+		typeof(f(__VA_ARGS__)) ret;				\
+		ret = __kvm_call_hyp(kvm_ksym_ref_nvhe(f),		\
+				     ##__VA_ARGS__);			\
+	})
+
 /*
  * The couple of isb() below are there to guarantee the same behaviour
  * on VHE as on !VHE, where the eret to EL1 acts as a context
@@ -459,7 +473,7 @@  u64 __kvm_call_hyp(void *hypfn, ...);
 			f(__VA_ARGS__);					\
 			isb();						\
 		} else {						\
-			__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
 		}							\
 	} while(0)
 
@@ -471,8 +485,7 @@  u64 __kvm_call_hyp(void *hypfn, ...);
 			ret = f(__VA_ARGS__);				\
 			isb();						\
 		} else {						\
-			ret = __kvm_call_hyp(kvm_ksym_ref(f),		\
-					     ##__VA_ARGS__);		\
+			ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__);	\
 		}							\
 									\
 		ret;							\
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index f32b406e90c0..89affa38b143 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -61,6 +61,21 @@  __efistub__ctype		= _ctype;
  * memory mappings.
  */
 
+__kvm_nvhe___kvm_enable_ssbs = __kvm_enable_ssbs;
+__kvm_nvhe___kvm_flush_vm_context = __kvm_flush_vm_context;
+__kvm_nvhe___kvm_get_mdcr_el2 = __kvm_get_mdcr_el2;
+__kvm_nvhe___kvm_timer_set_cntvoff = __kvm_timer_set_cntvoff;
+__kvm_nvhe___kvm_tlb_flush_local_vmid = __kvm_tlb_flush_local_vmid;
+__kvm_nvhe___kvm_tlb_flush_vmid = __kvm_tlb_flush_vmid;
+__kvm_nvhe___kvm_tlb_flush_vmid_ipa = __kvm_tlb_flush_vmid_ipa;
+__kvm_nvhe___kvm_vcpu_run_nvhe = __kvm_vcpu_run_nvhe;
+__kvm_nvhe___vgic_v3_get_ich_vtr_el2 = __vgic_v3_get_ich_vtr_el2;
+__kvm_nvhe___vgic_v3_init_lrs = __vgic_v3_init_lrs;
+__kvm_nvhe___vgic_v3_read_vmcr = __vgic_v3_read_vmcr;
+__kvm_nvhe___vgic_v3_restore_aprs = __vgic_v3_restore_aprs;
+__kvm_nvhe___vgic_v3_save_aprs = __vgic_v3_save_aprs;
+__kvm_nvhe___vgic_v3_write_vmcr = __vgic_v3_write_vmcr;
+
 #endif /* CONFIG_KVM */
 
 #endif /* __ARM64_KERNEL_IMAGE_VARS_H */