diff mbox series

[v3,05/15] arm64: kvm: Build hyp-entry.S separately for VHE/nVHE

Message ID 20200618122537.9625-6-dbrazdil@google.com (mailing list archive)
State New, archived
Headers show
Series Split off nVHE hyp code | expand

Commit Message

David Brazdil June 18, 2020, 12:25 p.m. UTC
This patch is part of a series which builds KVM's non-VHE hyp code separately
from VHE and the rest of the kernel.

hyp-entry.S contains implementation of KVM hyp vectors. This code is mostly
shared between VHE/nVHE, therefore compile it under both VHE and nVHE build
rules. nVHE-specific host HVC handler is hidden behind __KVM_NVHE_HYPERVISOR__.

Adjust code which selects which KVM hyp vecs to install to choose the correct
VHE/nVHE symbol.

Signed-off-by: David Brazdil <dbrazdil@google.com>
---
 arch/arm64/include/asm/kvm_asm.h |  7 ++++++-
 arch/arm64/include/asm/kvm_mmu.h | 16 ++++++++++------
 arch/arm64/include/asm/mmu.h     |  7 -------
 arch/arm64/kernel/cpu_errata.c   |  4 +++-
 arch/arm64/kernel/image-vars.h   | 12 ++++++++++++
 arch/arm64/kvm/hyp/hyp-entry.S   |  2 ++
 arch/arm64/kvm/hyp/nvhe/Makefile |  2 +-
 arch/arm64/kvm/va_layout.c       |  2 +-
 8 files changed, 35 insertions(+), 17 deletions(-)

Comments

Marc Zyngier June 18, 2020, 5:17 p.m. UTC | #1
Hi David,

On 2020-06-18 13:25, David Brazdil wrote:
> This patch is part of a series which builds KVM's non-VHE hyp code 
> separately
> from VHE and the rest of the kernel.

The above comment doesn't really belong here, and us only fit for the 
cover letter.

> 
> hyp-entry.S contains implementation of KVM hyp vectors. This code is 
> mostly
> shared between VHE/nVHE, therefore compile it under both VHE and nVHE 
> build
> rules. nVHE-specific host HVC handler is hidden behind 
> __KVM_NVHE_HYPERVISOR__.
> 
> Adjust code which selects which KVM hyp vecs to install to choose the 
> correct
> VHE/nVHE symbol.
> 
> Signed-off-by: David Brazdil <dbrazdil@google.com>
> ---
>  arch/arm64/include/asm/kvm_asm.h |  7 ++++++-
>  arch/arm64/include/asm/kvm_mmu.h | 16 ++++++++++------
>  arch/arm64/include/asm/mmu.h     |  7 -------
>  arch/arm64/kernel/cpu_errata.c   |  4 +++-
>  arch/arm64/kernel/image-vars.h   | 12 ++++++++++++
>  arch/arm64/kvm/hyp/hyp-entry.S   |  2 ++
>  arch/arm64/kvm/hyp/nvhe/Makefile |  2 +-
>  arch/arm64/kvm/va_layout.c       |  2 +-
>  8 files changed, 35 insertions(+), 17 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_asm.h 
> b/arch/arm64/include/asm/kvm_asm.h
> index 6a682d66a640..2baa69324cc9 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -76,7 +76,12 @@ struct kvm_vcpu;
>  extern char __kvm_hyp_init[];
>  extern char __kvm_hyp_init_end[];
> 
> -extern char __kvm_hyp_vector[];
> +DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
> +
> +#ifdef CONFIG_KVM_INDIRECT_VECTORS
> +DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
> +extern atomic_t arm64_el2_vector_last_slot;
> +#endif
> 
>  extern void __kvm_flush_vm_context(void);
>  extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t 
> ipa);
> diff --git a/arch/arm64/include/asm/kvm_mmu.h 
> b/arch/arm64/include/asm/kvm_mmu.h
> index b12bfc1f051a..5bfc7ee61997 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -461,11 +461,15 @@ extern int __kvm_harden_el2_vector_slot;
>  static inline void *kvm_get_hyp_vector(void)
>  {
>  	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
> -	void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
>  	int slot = -1;
> +	void *vect = kern_hyp_va(has_vhe()
> +		? kvm_ksym_ref(__kvm_hyp_vector)
> +		: kvm_ksym_ref_nvhe(__kvm_hyp_vector));

If you are introducing has_vhe() at this stage, then you might as well 
not apply kernel_hyp_va() to the address. This also make the declaration 
block look a bit ugly (yes, I'm a bit of a maniac). I'd rather see 
something like:

diff --git a/arch/arm64/include/asm/kvm_mmu.h 
b/arch/arm64/include/asm/kvm_mmu.h
index 5bfc7ee61997..e915c47744bc 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -457,19 +457,25 @@ static inline int kvm_write_guest_lock(struct kvm 
*kvm, gpa_t gpa,
  extern void *__kvm_bp_vect_base;
  extern int __kvm_harden_el2_vector_slot;

+#define get_hyp_vector_address(v)				\
+({								\
+	void *__v;						\
+	if (has_vhe())						\
+		__v = kvm_ksym_ref(v);				\
+	else							\
+		__v = kern_hyp_va(kvm_ksym_ref_nvhe(v));	\
+	__v;							\
+})
+
  /*  This is called on both VHE and !VHE systems */
  static inline void *kvm_get_hyp_vector(void)
  {
  	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
+	void *vect = get_hyp_vector_address(__kvm_hyp_vector);
  	int slot = -1;
-	void *vect = kern_hyp_va(has_vhe()
-		? kvm_ksym_ref(__kvm_hyp_vector)
-		: kvm_ksym_ref_nvhe(__kvm_hyp_vector));

  	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
-		vect = kern_hyp_va(has_vhe()
-			? kvm_ksym_ref(__bp_harden_hyp_vecs)
-			: kvm_ksym_ref_nvhe(__bp_harden_hyp_vecs));
+		vect = get_hyp_vector_address(__bp_harden_hyp_vecs);
  		slot = data->hyp_vectors_slot;
  	}

> 
>  	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
> -		vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
> +		vect = kern_hyp_va(has_vhe()
> +			? kvm_ksym_ref(__bp_harden_hyp_vecs)
> +			: kvm_ksym_ref_nvhe(__bp_harden_hyp_vecs));
>  		slot = data->hyp_vectors_slot;
>  	}
> 
> @@ -494,12 +498,11 @@ static inline int kvm_map_vectors(void)
>  	 *  HBP +  HEL2 -> use hardened vertors and use exec mapping
>  	 */
>  	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
> -		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
> -		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
> +		__kvm_bp_vect_base = 
> kern_hyp_va(kvm_ksym_ref_nvhe(__bp_harden_hyp_vecs));
>  	}
> 
>  	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
> -		phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
> +		phys_addr_t vect_pa = 
> __pa_symbol(kvm_nvhe_sym(__bp_harden_hyp_vecs));
>  		unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
> 
>  		/*
> @@ -518,7 +521,8 @@ static inline int kvm_map_vectors(void)
>  #else
>  static inline void *kvm_get_hyp_vector(void)
>  {
> -	return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
> +	return kern_hyp_va(has_vhe() ? kvm_ksym_ref(__kvm_hyp_vector)
> +				     : kvm_ksym_ref_nvhe(__kvm_hyp_vector));
>  }
> 
>  static inline int kvm_map_vectors(void)
> diff --git a/arch/arm64/include/asm/mmu.h 
> b/arch/arm64/include/asm/mmu.h
> index 68140fdd89d6..4d913f6dd366 100644
> --- a/arch/arm64/include/asm/mmu.h
> +++ b/arch/arm64/include/asm/mmu.h
> @@ -42,13 +42,6 @@ struct bp_hardening_data {
>  	bp_hardening_cb_t	fn;
>  };
> 
> -#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
> -     defined(CONFIG_HARDEN_EL2_VECTORS))
> -
> -extern char __bp_harden_hyp_vecs[];
> -extern atomic_t arm64_el2_vector_last_slot;
> -#endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS 
> */
> -
>  #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
>  DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, 
> bp_hardening_data);
> 
> diff --git a/arch/arm64/kernel/cpu_errata.c 
> b/arch/arm64/kernel/cpu_errata.c
> index ad06d6802d2e..318b76a62c56 100644
> --- a/arch/arm64/kernel/cpu_errata.c
> +++ b/arch/arm64/kernel/cpu_errata.c
> @@ -117,7 +117,9 @@ DEFINE_PER_CPU_READ_MOSTLY(struct
> bp_hardening_data, bp_hardening_data);
>  static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
>  				const char *hyp_vecs_end)
>  {
> -	void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
> +	char *vec = has_vhe() ? __bp_harden_hyp_vecs
> +			      : kvm_nvhe_sym(__bp_harden_hyp_vecs);

If we get this construct often, then something that abstracts
the uggliness of the symbol duality would be nice...

> +	void *dst = lm_alias(vec + slot * SZ_2K);
>  	int i;
> 
>  	for (i = 0; i < SZ_2K; i += 0x80)
> diff --git a/arch/arm64/kernel/image-vars.h 
> b/arch/arm64/kernel/image-vars.h
> index 89affa38b143..dc7ee85531f5 100644
> --- a/arch/arm64/kernel/image-vars.h
> +++ b/arch/arm64/kernel/image-vars.h
> @@ -61,9 +61,11 @@ __efistub__ctype		= _ctype;
>   * memory mappings.
>   */
> 
> +__kvm_nvhe___guest_exit = __guest_exit;
>  __kvm_nvhe___kvm_enable_ssbs = __kvm_enable_ssbs;
>  __kvm_nvhe___kvm_flush_vm_context = __kvm_flush_vm_context;
>  __kvm_nvhe___kvm_get_mdcr_el2 = __kvm_get_mdcr_el2;
> +__kvm_nvhe___kvm_handle_stub_hvc = __kvm_handle_stub_hvc;
>  __kvm_nvhe___kvm_timer_set_cntvoff = __kvm_timer_set_cntvoff;
>  __kvm_nvhe___kvm_tlb_flush_local_vmid = __kvm_tlb_flush_local_vmid;
>  __kvm_nvhe___kvm_tlb_flush_vmid = __kvm_tlb_flush_vmid;
> @@ -75,6 +77,16 @@ __kvm_nvhe___vgic_v3_read_vmcr = 
> __vgic_v3_read_vmcr;
>  __kvm_nvhe___vgic_v3_restore_aprs = __vgic_v3_restore_aprs;
>  __kvm_nvhe___vgic_v3_save_aprs = __vgic_v3_save_aprs;
>  __kvm_nvhe___vgic_v3_write_vmcr = __vgic_v3_write_vmcr;
> +__kvm_nvhe_abort_guest_exit_end = abort_guest_exit_end;
> +__kvm_nvhe_abort_guest_exit_start = abort_guest_exit_start;
> +__kvm_nvhe_arm64_enable_wa2_handling = arm64_enable_wa2_handling;
> +__kvm_nvhe_arm64_ssbd_callback_required = 
> arm64_ssbd_callback_required;
> +__kvm_nvhe_hyp_panic = hyp_panic;
> +__kvm_nvhe_kimage_voffset = kimage_voffset;
> +__kvm_nvhe_kvm_host_data = kvm_host_data;
> +__kvm_nvhe_kvm_patch_vector_branch = kvm_patch_vector_branch;
> +__kvm_nvhe_kvm_update_va_mask = kvm_update_va_mask;
> +__kvm_nvhe_panic = panic;
> 
>  #endif /* CONFIG_KVM */
> 
> diff --git a/arch/arm64/kvm/hyp/hyp-entry.S 
> b/arch/arm64/kvm/hyp/hyp-entry.S
> index d362fad97cc8..7e3c72fa634f 100644
> --- a/arch/arm64/kvm/hyp/hyp-entry.S
> +++ b/arch/arm64/kvm/hyp/hyp-entry.S
> @@ -40,6 +40,7 @@ el1_sync:				// Guest trapped into EL2
>  	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
>  	b.ne	el1_trap
> 
> +#ifdef __KVM_NVHE_HYPERVISOR__
>  	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
>  	cbnz	x1, el1_hvc_guest	// called HVC
> 
> @@ -74,6 +75,7 @@ el1_sync:				// Guest trapped into EL2
> 
>  	eret
>  	sb
> +#endif /* __KVM_NVHE_HYPERVISOR__ */
> 
>  el1_hvc_guest:
>  	/*
> diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile 
> b/arch/arm64/kvm/hyp/nvhe/Makefile
> index 7d64235dba62..c68801e24950 100644
> --- a/arch/arm64/kvm/hyp/nvhe/Makefile
> +++ b/arch/arm64/kvm/hyp/nvhe/Makefile
> @@ -7,7 +7,7 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__
>  ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -fno-stack-protector \
>  	     -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN)
> 
> -obj-y :=
> +obj-y := ../hyp-entry.o
> 
>  obj-y := $(patsubst %.o,%.hyp.o,$(obj-y))
>  extra-y := $(patsubst %.hyp.o,%.hyp.tmp.o,$(obj-y))
> diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
> index a4f48c1ac28c..157d106235f7 100644
> --- a/arch/arm64/kvm/va_layout.c
> +++ b/arch/arm64/kvm/va_layout.c
> @@ -150,7 +150,7 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
>  	/*
>  	 * Compute HYP VA by using the same computation as kern_hyp_va()
>  	 */
> -	addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector);
> +	addr = (uintptr_t)kvm_ksym_ref_nvhe(__kvm_hyp_vector);
>  	addr &= va_mask;
>  	addr |= tag_val << tag_lsb;

Thanks,

         M.
David Brazdil June 22, 2020, 10:20 a.m. UTC | #2
Hi Marc,

> > -	void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
> > +	char *vec = has_vhe() ? __bp_harden_hyp_vecs
> > +			      : kvm_nvhe_sym(__bp_harden_hyp_vecs);
> 
> If we get this construct often, then something that abstracts
> the uggliness of the symbol duality would be nice...

Agreed, I do hope that this will end up being limited to finding the address of
the hyp-init vector once EL2 becomes self-contained. Even this vector selection
can be done in EL2 where the symbol duality does not exist.
If we were to hide it, there is a trade off between code "elegance" and clarity
of what's happening under the hood. I was thinking we could extract this
`has_vhe() ? foo : __kvm_nvhe_foo` as a `#define foo` but I do worry that
anybody debugging this code would be cursing my name. It would also not work
with other macros that take symbol names, notably kvm_ksym_ref. But that can be
rewritten to accept a pointer instead. The more verbose but less magic approach
is to have a bunch of different helpers for various situations, eg.
__pa_symbol_nvhe. What would be your preference?

-David
Marc Zyngier June 25, 2020, 8:12 a.m. UTC | #3
Hi David,

On 2020-06-22 11:20, David Brazdil wrote:
> Hi Marc,
> 
>> > -	void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
>> > +	char *vec = has_vhe() ? __bp_harden_hyp_vecs
>> > +			      : kvm_nvhe_sym(__bp_harden_hyp_vecs);
>> 
>> If we get this construct often, then something that abstracts
>> the uggliness of the symbol duality would be nice...
> 
> Agreed, I do hope that this will end up being limited to finding the 
> address of
> the hyp-init vector once EL2 becomes self-contained. Even this vector 
> selection
> can be done in EL2 where the symbol duality does not exist.
> If we were to hide it, there is a trade off between code "elegance" and 
> clarity
> of what's happening under the hood. I was thinking we could extract 
> this
> `has_vhe() ? foo : __kvm_nvhe_foo` as a `#define foo` but I do worry 
> that
> anybody debugging this code would be cursing my name.

I would say that whoever is debugging this code better have an 
understanding
of how things are supposed to work. Given that this is only a handful of
people so far, I think your name is safe! ;-)

> It would also not work
> with other macros that take symbol names, notably kvm_ksym_ref. But 
> that can be
> rewritten to accept a pointer instead. The more verbose but less magic 
> approach
> is to have a bunch of different helpers for various situations, eg.
> __pa_symbol_nvhe. What would be your preference?

I'd be happy with the (maybe temporary) magic approach. It helps 
reasoning
about things, and makes the transition smoother. Yes, bugs could crop up
there, but given the static nature of obtaining a symbol's address, I'm
fairly confident we'll get it right. The same cannot be said about 
pointers
though.

Thanks,

         M.
David Brazdil June 25, 2020, 11:53 a.m. UTC | #4
Hey Marc,

> I'd be happy with the (maybe temporary) magic approach. It helps reasoning
> about things, and makes the transition smoother. Yes, bugs could crop up
> there, but given the static nature of obtaining a symbol's address, I'm
> fairly confident we'll get it right. The same cannot be said about pointers
> though.

Ok, so this is what it would look like:

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 6a682d66a640..0d1b3b1946f0 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -59,13 +59,14 @@
 #define DECLARE_KVM_HYP_SYM(sym)               \
        DECLARE_KVM_VHE_SYM(sym);               \
        DECLARE_KVM_NVHE_SYM(sym)
+#define CHOOSE_HYP_SYM(sym) (has_vhe() ? sym : kvm_nvhe_sym(sym))

 /* Translate a kernel address of @sym into its equivalent linear mapping */
-#define kvm_ksym_ref(sym)                                              \
+#define kvm_ksym_ref(ptr)                                              \
        ({                                                              \
-               void *val = &sym;                                       \
+               void *val = (ptr);                                      \
                if (!is_kernel_in_hyp_mode())                           \
-                       val = lm_alias(&sym);                           \
+                       val = lm_alias((ptr));                          \
                val;                                                    \
         })
 #define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym))
@@ -76,7 +77,14 @@ struct kvm_vcpu;
 extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];

-extern char __kvm_hyp_vector[];
+DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
+#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
+
+#ifdef CONFIG_KVM_INDIRECT_VECTORS
+DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
+#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
+extern atomic_t arm64_el2_vector_last_slot;
+#endif

 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);

Everything compiles and boots. Only existing code I had to change was:

diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 90cb90561446..34b551385153 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1285,7 +1285,7 @@ static void cpu_init_hyp_mode(void)
         * so that we can use adr_l to access per-cpu variables in EL2.
         */
        tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) -
-                    (unsigned long)kvm_ksym_ref(kvm_host_data));
+                    (unsigned long)kvm_ksym_ref(&kvm_host_data));

        pgd_ptr = kvm_mmu_get_httbr();
        hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;

WDYT?

Thanks,
-David
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 6a682d66a640..2baa69324cc9 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -76,7 +76,12 @@  struct kvm_vcpu;
 extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];
 
-extern char __kvm_hyp_vector[];
+DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
+
+#ifdef CONFIG_KVM_INDIRECT_VECTORS
+DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
+extern atomic_t arm64_el2_vector_last_slot;
+#endif
 
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index b12bfc1f051a..5bfc7ee61997 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -461,11 +461,15 @@  extern int __kvm_harden_el2_vector_slot;
 static inline void *kvm_get_hyp_vector(void)
 {
 	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
-	void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
 	int slot = -1;
+	void *vect = kern_hyp_va(has_vhe()
+		? kvm_ksym_ref(__kvm_hyp_vector)
+		: kvm_ksym_ref_nvhe(__kvm_hyp_vector));
 
 	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
-		vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
+		vect = kern_hyp_va(has_vhe()
+			? kvm_ksym_ref(__bp_harden_hyp_vecs)
+			: kvm_ksym_ref_nvhe(__bp_harden_hyp_vecs));
 		slot = data->hyp_vectors_slot;
 	}
 
@@ -494,12 +498,11 @@  static inline int kvm_map_vectors(void)
 	 *  HBP +  HEL2 -> use hardened vertors and use exec mapping
 	 */
 	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
-		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
-		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+		__kvm_bp_vect_base = kern_hyp_va(kvm_ksym_ref_nvhe(__bp_harden_hyp_vecs));
 	}
 
 	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
-		phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
+		phys_addr_t vect_pa = __pa_symbol(kvm_nvhe_sym(__bp_harden_hyp_vecs));
 		unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
 
 		/*
@@ -518,7 +521,8 @@  static inline int kvm_map_vectors(void)
 #else
 static inline void *kvm_get_hyp_vector(void)
 {
-	return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
+	return kern_hyp_va(has_vhe() ? kvm_ksym_ref(__kvm_hyp_vector)
+				     : kvm_ksym_ref_nvhe(__kvm_hyp_vector));
 }
 
 static inline int kvm_map_vectors(void)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 68140fdd89d6..4d913f6dd366 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -42,13 +42,6 @@  struct bp_hardening_data {
 	bp_hardening_cb_t	fn;
 };
 
-#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
-     defined(CONFIG_HARDEN_EL2_VECTORS))
-
-extern char __bp_harden_hyp_vecs[];
-extern atomic_t arm64_el2_vector_last_slot;
-#endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
-
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index ad06d6802d2e..318b76a62c56 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -117,7 +117,9 @@  DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
 				const char *hyp_vecs_end)
 {
-	void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
+	char *vec = has_vhe() ? __bp_harden_hyp_vecs
+			      : kvm_nvhe_sym(__bp_harden_hyp_vecs);
+	void *dst = lm_alias(vec + slot * SZ_2K);
 	int i;
 
 	for (i = 0; i < SZ_2K; i += 0x80)
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 89affa38b143..dc7ee85531f5 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -61,9 +61,11 @@  __efistub__ctype		= _ctype;
  * memory mappings.
  */
 
+__kvm_nvhe___guest_exit = __guest_exit;
 __kvm_nvhe___kvm_enable_ssbs = __kvm_enable_ssbs;
 __kvm_nvhe___kvm_flush_vm_context = __kvm_flush_vm_context;
 __kvm_nvhe___kvm_get_mdcr_el2 = __kvm_get_mdcr_el2;
+__kvm_nvhe___kvm_handle_stub_hvc = __kvm_handle_stub_hvc;
 __kvm_nvhe___kvm_timer_set_cntvoff = __kvm_timer_set_cntvoff;
 __kvm_nvhe___kvm_tlb_flush_local_vmid = __kvm_tlb_flush_local_vmid;
 __kvm_nvhe___kvm_tlb_flush_vmid = __kvm_tlb_flush_vmid;
@@ -75,6 +77,16 @@  __kvm_nvhe___vgic_v3_read_vmcr = __vgic_v3_read_vmcr;
 __kvm_nvhe___vgic_v3_restore_aprs = __vgic_v3_restore_aprs;
 __kvm_nvhe___vgic_v3_save_aprs = __vgic_v3_save_aprs;
 __kvm_nvhe___vgic_v3_write_vmcr = __vgic_v3_write_vmcr;
+__kvm_nvhe_abort_guest_exit_end = abort_guest_exit_end;
+__kvm_nvhe_abort_guest_exit_start = abort_guest_exit_start;
+__kvm_nvhe_arm64_enable_wa2_handling = arm64_enable_wa2_handling;
+__kvm_nvhe_arm64_ssbd_callback_required = arm64_ssbd_callback_required;
+__kvm_nvhe_hyp_panic = hyp_panic;
+__kvm_nvhe_kimage_voffset = kimage_voffset;
+__kvm_nvhe_kvm_host_data = kvm_host_data;
+__kvm_nvhe_kvm_patch_vector_branch = kvm_patch_vector_branch;
+__kvm_nvhe_kvm_update_va_mask = kvm_update_va_mask;
+__kvm_nvhe_panic = panic;
 
 #endif /* CONFIG_KVM */
 
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index d362fad97cc8..7e3c72fa634f 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -40,6 +40,7 @@  el1_sync:				// Guest trapped into EL2
 	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
 	b.ne	el1_trap
 
+#ifdef __KVM_NVHE_HYPERVISOR__
 	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
 	cbnz	x1, el1_hvc_guest	// called HVC
 
@@ -74,6 +75,7 @@  el1_sync:				// Guest trapped into EL2
 
 	eret
 	sb
+#endif /* __KVM_NVHE_HYPERVISOR__ */
 
 el1_hvc_guest:
 	/*
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 7d64235dba62..c68801e24950 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -7,7 +7,7 @@  asflags-y := -D__KVM_NVHE_HYPERVISOR__
 ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -fno-stack-protector \
 	     -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN)
 
-obj-y :=
+obj-y := ../hyp-entry.o
 
 obj-y := $(patsubst %.o,%.hyp.o,$(obj-y))
 extra-y := $(patsubst %.hyp.o,%.hyp.tmp.o,$(obj-y))
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index a4f48c1ac28c..157d106235f7 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -150,7 +150,7 @@  void kvm_patch_vector_branch(struct alt_instr *alt,
 	/*
 	 * Compute HYP VA by using the same computation as kern_hyp_va()
 	 */
-	addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector);
+	addr = (uintptr_t)kvm_ksym_ref_nvhe(__kvm_hyp_vector);
 	addr &= va_mask;
 	addr |= tag_val << tag_lsb;