@@ -285,6 +285,5 @@ static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
}
-
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
@@ -1373,7 +1373,7 @@ static void cpu_prepare_hyp_mode(int cpu)
__flush_dcache_area(params, sizeof(*params));
}
-static void cpu_init_hyp_mode(void)
+static void kvm_set_hyp_vector(void)
{
struct kvm_nvhe_init_params *params;
struct arm_smccc_res res;
@@ -1391,6 +1391,11 @@ static void cpu_init_hyp_mode(void)
params = this_cpu_ptr_nvhe_sym(kvm_init_params);
arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
+}
+
+static void cpu_init_hyp_mode(void)
+{
+ kvm_set_hyp_vector();
/*
* Disabling SSBD on a non-VHE system requires us to enable SSBS
@@ -1433,7 +1438,10 @@ static void cpu_set_hyp_vector(void)
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
void *vector = hyp_spectre_vector_selector[data->slot];
- *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
+ if (!is_protected_kvm_enabled())
+ *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
+ else
+ kvm_call_hyp_nvhe(__hyp_cpu_set_vector, data->slot);
}
static void cpu_hyp_reinit(void)
@@ -1441,13 +1449,14 @@ static void cpu_hyp_reinit(void)
kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
cpu_hyp_reset();
- cpu_set_hyp_vector();
if (is_kernel_in_hyp_mode())
kvm_timer_init_vhe();
else
cpu_init_hyp_mode();
+ cpu_set_hyp_vector();
+
kvm_arm_init_debug();
if (vgic_present)
@@ -1653,6 +1662,36 @@ static int copy_cpu_ftr_regs(void)
return 0;
}
+static int kvm_hyp_enable_protection(void)
+{
+ void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base);
+ int ret, cpu;
+ void *addr;
+
+ if (!is_protected_kvm_enabled())
+ return 0;
+
+ if (!hyp_mem_base)
+ return -ENOMEM;
+
+ addr = phys_to_virt(hyp_mem_base);
+ ret = create_hyp_mappings(addr, addr + hyp_mem_size - 1, PAGE_HYP);
+ if (ret)
+ return ret;
+
+ kvm_set_hyp_vector();
+ ret = kvm_call_hyp_nvhe(__kvm_hyp_protect, hyp_mem_base, hyp_mem_size,
+ num_possible_cpus(), kern_hyp_va(per_cpu_base));
+ if (ret)
+ return ret;
+
+ free_hyp_pgds();
+ for_each_possible_cpu(cpu)
+ free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+
+ return 0;
+}
+
/**
* Inits Hyp-mode on all online CPUs
*/
@@ -1789,6 +1828,12 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu)
cpu_prepare_hyp_mode(cpu);
+ err = kvm_hyp_enable_protection();
+ if (err) {
+ kvm_err("Failed to enable hyp memory protection: %d\n", err);
+ goto out_err;
+ }
+
return 0;
out_err:
@@ -225,15 +225,39 @@ void free_hyp_pgds(void)
if (hyp_pgtable) {
kvm_pgtable_hyp_destroy(hyp_pgtable);
kfree(hyp_pgtable);
+ hyp_pgtable = NULL;
}
mutex_unlock(&kvm_hyp_pgd_mutex);
}
+static bool kvm_host_owns_hyp_mappings(void)
+{
+ if (static_branch_likely(&kvm_protected_mode_initialized))
+ return false;
+
+ /*
+ * This can happen at boot time when __create_hyp_mappings() is called
+ * after the hyp protection has been enabled, but the static key has
+ * not been flipped yet.
+ */
+ if (!hyp_pgtable && is_protected_kvm_enabled())
+ return false;
+
+ BUG_ON(!hyp_pgtable);
+
+ return true;
+}
+
static int __create_hyp_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot)
{
int err;
+ if (!kvm_host_owns_hyp_mappings()) {
+ return kvm_call_hyp_nvhe(__hyp_create_mappings,
+ start, size, phys, prot);
+ }
+
mutex_lock(&kvm_hyp_pgd_mutex);
err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -295,6 +319,16 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
unsigned long base;
int ret = 0;
+ if (!kvm_host_owns_hyp_mappings()) {
+ base = kvm_call_hyp_nvhe(__hyp_create_private_mapping,
+ phys_addr, size, prot);
+ if (!base)
+ return -ENOMEM;
+ *haddr = base;
+
+ return 0;
+ }
+
mutex_lock(&kvm_hyp_pgd_mutex);
/*
Previous commits have introduced infrastructure at EL2 to enable the Hyp code to manage its own memory, and more specifically its stage 1 page tables. However, this was preliminary work, and none of it is currently in use. Put all of this together by elevating the hyp mappings creation at EL2 when memory protection is enabled. In this case, the host kernel running at EL1 still creates _temporary_ Hyp mappings, only used while initializing the hypervisor, but frees them right after, and flips a static key marking the new 'protected' mode of operation. As such, all calls to create_hyp_mappings() after kvm init has finished turn into hypercalls, as the host now has no 'legal' way to modify the hypevisor page tables directly. Signed-off-by: Quentin Perret <qperret@google.com> --- arch/arm64/include/asm/kvm_mmu.h | 1 - arch/arm64/kvm/arm.c | 51 ++++++++++++++++++++++++++++++-- arch/arm64/kvm/mmu.c | 34 +++++++++++++++++++++ 3 files changed, 82 insertions(+), 4 deletions(-)