@@ -12,10 +12,8 @@ extern pgd_t *idmap_pgd;
void setup_mm_for_reboot(void);
#ifdef CONFIG_ARM_VIRT_EXT
-extern pgd_t *hyp_pgd;
-
-void hyp_idmap_teardown(void);
-void hyp_idmap_setup(void);
+void hyp_idmap_teardown(pgd_t *hyp_pgd);
+void hyp_idmap_setup(pgd_t *hyp_pgd);
#endif
#endif /* __ASM_IDMAP_H */
@@ -43,4 +43,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu,
struct kvm_run *run);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+unsigned long kvm_mmu_get_httbr(void);
+int kvm_mmu_init(void);
+void kvm_mmu_exit(void);
#endif /* __ARM_KVM_MMU_H__ */
@@ -35,7 +35,6 @@
#include <asm/ptrace.h>
#include <asm/mman.h>
#include <asm/cputype.h>
-#include <asm/idmap.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/virt.h>
@@ -887,7 +886,7 @@ static void cpu_init_hyp_mode(void *vector)
/* Switch from the HYP stub to our own HYP init vector */
__hyp_set_vectors((unsigned long)vector);
- pgd_ptr = virt_to_phys(hyp_pgd);
+ pgd_ptr = kvm_mmu_get_httbr();
stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
vector_ptr = (unsigned long)__kvm_hyp_vector;
@@ -918,6 +917,13 @@ static int init_hyp_mode(void)
int err = 0;
/*
+ * Allocate Hyp PGD and setup Hyp identity mapping
+ */
+ err = kvm_mmu_init();
+ if (err)
+ return err;
+
+ /*
* It is probably enough to obtain the default on one
* CPU. It's unlikely to be different on the others.
*/
@@ -954,7 +960,7 @@ static int init_hyp_mode(void)
/*
* Unmap the identity mapping
*/
- hyp_idmap_teardown();
+ kvm_mmu_exit();
/*
* Map the Hyp-code called directly from the host
@@ -34,6 +34,7 @@
#include "trace.h"
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
+static pgd_t *hyp_pgd;
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
int min, int max)
@@ -994,3 +995,23 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}
+
+unsigned long kvm_mmu_get_httbr(void)
+{
+ return virt_to_phys(hyp_pgd);
+}
+
+int kvm_mmu_init(void)
+{
+ hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+ if (!hyp_pgd)
+ return -ENOMEM;
+
+ hyp_idmap_setup(hyp_pgd);
+ return 0;
+}
+
+void kvm_mmu_exit(void)
+{
+ hyp_idmap_teardown(hyp_pgd);
+}
@@ -102,9 +102,6 @@ static int __init init_static_idmap(void)
early_initcall(init_static_idmap);
#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE)
-pgd_t *hyp_pgd;
-EXPORT_SYMBOL_GPL(hyp_pgd);
-
static void hyp_idmap_del_pmd(pgd_t *pgd, unsigned long addr)
{
pud_t *pud;
@@ -123,7 +120,7 @@ extern char __hyp_idmap_text_start[],
__hyp_idmap_text_end[];
* This version actually frees the underlying pmds for all pgds in range and
* clear the pgds themselves afterwards.
*/
-void hyp_idmap_teardown(void)
+void hyp_idmap_teardown(pgd_t *hyp_pgd)
{
unsigned long addr, end;