diff mbox

[08/11] arm64: KVM: Use per-CPU vector when BP hardening is enabled

Message ID 1515078515-13723-9-git-send-email-will.deacon@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Will Deacon Jan. 4, 2018, 3:08 p.m. UTC
From: Marc Zyngier <marc.zyngier@arm.com>

Now that we have per-CPU vectors, let's plug then in the KVM/arm64 code.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
 arch/arm/include/asm/kvm_mmu.h   | 10 ++++++++++
 arch/arm64/include/asm/kvm_mmu.h | 38 ++++++++++++++++++++++++++++++++++++++
 arch/arm64/kvm/hyp/switch.c      |  2 +-
 virt/kvm/arm/arm.c               |  8 +++++++-
 4 files changed, 56 insertions(+), 2 deletions(-)

Comments

Ard Biesheuvel Jan. 4, 2018, 4:28 p.m. UTC | #1
On 4 January 2018 at 15:08, Will Deacon <will.deacon@arm.com> wrote:
> From: Marc Zyngier <marc.zyngier@arm.com>
>
> Now that we have per-CPU vectors, let's plug then in the KVM/arm64 code.
>

Why does bp hardening require per-cpu vectors?

> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> Signed-off-by: Will Deacon <will.deacon@arm.com>
> ---
>  arch/arm/include/asm/kvm_mmu.h   | 10 ++++++++++
>  arch/arm64/include/asm/kvm_mmu.h | 38 ++++++++++++++++++++++++++++++++++++++
>  arch/arm64/kvm/hyp/switch.c      |  2 +-
>  virt/kvm/arm/arm.c               |  8 +++++++-
>  4 files changed, 56 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
> index fa6f2174276b..eb46fc81a440 100644
> --- a/arch/arm/include/asm/kvm_mmu.h
> +++ b/arch/arm/include/asm/kvm_mmu.h
> @@ -221,6 +221,16 @@ static inline unsigned int kvm_get_vmid_bits(void)
>         return 8;
>  }
>
> +static inline void *kvm_get_hyp_vector(void)
> +{
> +       return kvm_ksym_ref(__kvm_hyp_vector);
> +}
> +
> +static inline int kvm_map_vectors(void)
> +{
> +       return 0;
> +}
> +
>  #endif /* !__ASSEMBLY__ */
>
>  #endif /* __ARM_KVM_MMU_H__ */
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 672c8684d5c2..2d6d4bd9de52 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -309,5 +309,43 @@ static inline unsigned int kvm_get_vmid_bits(void)
>         return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
>  }
>
> +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> +#include <asm/mmu.h>
> +
> +static inline void *kvm_get_hyp_vector(void)
> +{
> +       struct bp_hardening_data *data = arm64_get_bp_hardening_data();
> +       void *vect = kvm_ksym_ref(__kvm_hyp_vector);
> +
> +       if (data->fn) {
> +               vect = __bp_harden_hyp_vecs_start +
> +                      data->hyp_vectors_slot * SZ_2K;
> +
> +               if (!has_vhe())
> +                       vect = lm_alias(vect);
> +       }
> +
> +       return vect;
> +}
> +
> +static inline int kvm_map_vectors(void)
> +{
> +       return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> +                                  kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> +                                  PAGE_HYP_EXEC);
> +}
> +
> +#else
> +static inline void *kvm_get_hyp_vector(void)
> +{
> +       return kvm_ksym_ref(__kvm_hyp_vector);
> +}
> +
> +static inline int kvm_map_vectors(void)
> +{
> +       return 0;
> +}
> +#endif
> +
>  #endif /* __ASSEMBLY__ */
>  #endif /* __ARM64_KVM_MMU_H__ */
> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
> index f7c651f3a8c0..8d4f3c9d6dc4 100644
> --- a/arch/arm64/kvm/hyp/switch.c
> +++ b/arch/arm64/kvm/hyp/switch.c
> @@ -52,7 +52,7 @@ static void __hyp_text __activate_traps_vhe(void)
>         val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
>         write_sysreg(val, cpacr_el1);
>
> -       write_sysreg(__kvm_hyp_vector, vbar_el1);
> +       write_sysreg(kvm_get_hyp_vector(), vbar_el1);
>  }
>
>  static void __hyp_text __activate_traps_nvhe(void)
> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
> index 6b60c98a6e22..1c9fdb6db124 100644
> --- a/virt/kvm/arm/arm.c
> +++ b/virt/kvm/arm/arm.c
> @@ -1158,7 +1158,7 @@ static void cpu_init_hyp_mode(void *dummy)
>         pgd_ptr = kvm_mmu_get_httbr();
>         stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
>         hyp_stack_ptr = stack_page + PAGE_SIZE;
> -       vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
> +       vector_ptr = (unsigned long)kvm_get_hyp_vector();
>
>         __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
>         __cpu_init_stage2();
> @@ -1403,6 +1403,12 @@ static int init_hyp_mode(void)
>                 goto out_err;
>         }
>
> +       err = kvm_map_vectors();
> +       if (err) {
> +               kvm_err("Cannot map vectors\n");
> +               goto out_err;
> +       }
> +
>         /*
>          * Map the Hyp stack pages
>          */
> --
> 2.1.4
>
Marc Zyngier Jan. 4, 2018, 5:04 p.m. UTC | #2
On 04/01/18 16:28, Ard Biesheuvel wrote:
> On 4 January 2018 at 15:08, Will Deacon <will.deacon@arm.com> wrote:
>> From: Marc Zyngier <marc.zyngier@arm.com>
>>
>> Now that we have per-CPU vectors, let's plug then in the KVM/arm64 code.
>>
> 
> Why does bp hardening require per-cpu vectors?

The description is not 100% accurate. We have per *CPU type* vectors.
This stems from the following, slightly conflicting requirements:

- We have systems with more than one CPU type (think big-little)
- Different implementations require different BP hardening sequences
- The BP hardening sequence must be executed before doing any branch

The natural solution is to have one set of vectors per CPU type,
containing the BP hardening sequence for that particular implementation,
ending with a branch to the common code.

	M.
Ard Biesheuvel Jan. 4, 2018, 5:05 p.m. UTC | #3
On 4 January 2018 at 17:04, Marc Zyngier <marc.zyngier@arm.com> wrote:
> On 04/01/18 16:28, Ard Biesheuvel wrote:
>> On 4 January 2018 at 15:08, Will Deacon <will.deacon@arm.com> wrote:
>>> From: Marc Zyngier <marc.zyngier@arm.com>
>>>
>>> Now that we have per-CPU vectors, let's plug then in the KVM/arm64 code.
>>>
>>
>> Why does bp hardening require per-cpu vectors?
>
> The description is not 100% accurate. We have per *CPU type* vectors.
> This stems from the following, slightly conflicting requirements:
>
> - We have systems with more than one CPU type (think big-little)
> - Different implementations require different BP hardening sequences
> - The BP hardening sequence must be executed before doing any branch
>
> The natural solution is to have one set of vectors per CPU type,
> containing the BP hardening sequence for that particular implementation,
> ending with a branch to the common code.
>

Crystal clear, thanks.
diff mbox

Patch

diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index fa6f2174276b..eb46fc81a440 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -221,6 +221,16 @@  static inline unsigned int kvm_get_vmid_bits(void)
 	return 8;
 }
 
+static inline void *kvm_get_hyp_vector(void)
+{
+	return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+	return 0;
+}
+
 #endif	/* !__ASSEMBLY__ */
 
 #endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 672c8684d5c2..2d6d4bd9de52 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -309,5 +309,43 @@  static inline unsigned int kvm_get_vmid_bits(void)
 	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu.h>
+
+static inline void *kvm_get_hyp_vector(void)
+{
+	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
+	void *vect = kvm_ksym_ref(__kvm_hyp_vector);
+
+	if (data->fn) {
+		vect = __bp_harden_hyp_vecs_start +
+		       data->hyp_vectors_slot * SZ_2K;
+
+		if (!has_vhe())
+			vect = lm_alias(vect);
+	}
+
+	return vect;
+}
+
+static inline int kvm_map_vectors(void)
+{
+	return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
+				   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
+				   PAGE_HYP_EXEC);
+}
+
+#else
+static inline void *kvm_get_hyp_vector(void)
+{
+	return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+	return 0;
+}
+#endif
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index f7c651f3a8c0..8d4f3c9d6dc4 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -52,7 +52,7 @@  static void __hyp_text __activate_traps_vhe(void)
 	val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
 	write_sysreg(val, cpacr_el1);
 
-	write_sysreg(__kvm_hyp_vector, vbar_el1);
+	write_sysreg(kvm_get_hyp_vector(), vbar_el1);
 }
 
 static void __hyp_text __activate_traps_nvhe(void)
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 6b60c98a6e22..1c9fdb6db124 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -1158,7 +1158,7 @@  static void cpu_init_hyp_mode(void *dummy)
 	pgd_ptr = kvm_mmu_get_httbr();
 	stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
 	hyp_stack_ptr = stack_page + PAGE_SIZE;
-	vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
+	vector_ptr = (unsigned long)kvm_get_hyp_vector();
 
 	__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
 	__cpu_init_stage2();
@@ -1403,6 +1403,12 @@  static int init_hyp_mode(void)
 		goto out_err;
 	}
 
+	err = kvm_map_vectors();
+	if (err) {
+		kvm_err("Cannot map vectors\n");
+		goto out_err;
+	}
+
 	/*
 	 * Map the Hyp stack pages
 	 */