new file mode 100644
@@ -0,0 +1,103 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __KVM_ARM_H__
+#define __KVM_ARM_H__
+
+#include <asm/types.h>
+
+/* Hyp Configuration Register (HCR) bits */
+#define HCR_TGE (1 << 27)
+#define HCR_TVM (1 << 26)
+#define HCR_TTLB (1 << 25)
+#define HCR_TPU (1 << 24)
+#define HCR_TPC (1 << 23)
+#define HCR_TSW (1 << 22)
+#define HCR_TAC (1 << 21)
+#define HCR_TIDCP (1 << 20)
+#define HCR_TSC (1 << 19)
+#define HCR_TID3 (1 << 18)
+#define HCR_TID2 (1 << 17)
+#define HCR_TID1 (1 << 16)
+#define HCR_TID0 (1 << 15)
+#define HCR_TWE (1 << 14)
+#define HCR_TWI (1 << 13)
+#define HCR_DC (1 << 12)
+#define HCR_BSU (3 << 10)
+#define HCR_FB (1 << 9)
+#define HCR_VA (1 << 8)
+#define HCR_VI (1 << 7)
+#define HCR_VF (1 << 6)
+#define HCR_AMO (1 << 5)
+#define HCR_IMO (1 << 4)
+#define HCR_FMO (1 << 3)
+#define HCR_PTW (1 << 2)
+#define HCR_SWIO (1 << 1)
+#define HCR_VM 1
+#define HCR_GUEST_MASK (HCR_TSC | HCR_TWE | HCR_TWI | HCR_VM | HCR_AMO | \
+ HCR_AMO | HCR_IMO | HCR_FMO | HCR_SWIO)
+
+/* Hyp System Control Register (HSCTLR) bits */
+#define HSCTLR_TE (1 << 30)
+#define HSCTLR_EE (1 << 25)
+#define HSCTLR_FI (1 << 21)
+#define HSCTLR_WXN (1 << 19)
+#define HSCTLR_I (1 << 12)
+#define HSCTLR_C (1 << 2)
+#define HSCTLR_A (1 << 1)
+#define HSCTLR_M 1
+#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
+ HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
+
+/* TTBCR and HTCR Registers bits */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_IMP (1 << 30)
+#define TTBCR_SH1 (3 << 28)
+#define TTBCR_ORGN1 (3 << 26)
+#define TTBCR_IRGN1 (3 << 24)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_T1SZ (3 << 16)
+#define TTBCR_SH0 (3 << 12)
+#define TTBCR_ORGN0 (3 << 10)
+#define TTBCR_IRGN0 (3 << 8)
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_T0SZ 3
+#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
+
+
+/* Virtualization Translation Control Register (VTCR) bits */
+#define VTCR_SH0 (3 << 12)
+#define VTCR_ORGN0 (3 << 10)
+#define VTCR_IRGN0 (3 << 8)
+#define VTCR_SL0 (3 << 6)
+#define VTCR_S (1 << 4)
+#define VTCR_T0SZ 3
+#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
+ VTCR_S | VTCR_T0SZ | VTCR_MASK)
+#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
+#define VTCR_SL_L2 0 /* Starting-level: 2 */
+#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
+#define VTCR_GUEST_SL VTCR_SL_L1
+#define VTCR_GUEST_T0SZ 0
+#if VTCR_GUEST_SL == 0
+#define VTTBR_X (14 - VTCR_GUEST_T0SZ)
+#else
+#define VTTBR_X (5 - VTCR_GUEST_T0SZ)
+#endif
+
+
+#endif /* __KVM_ARM_H__ */
@@ -24,5 +24,17 @@
#define ARM_EXCEPTION_DATA_ABORT 4
#define ARM_EXCEPTION_IRQ 5
#define ARM_EXCEPTION_FIQ 6
+#define ARM_EXCEPTION_HVC 7
+
+/*
+ * SMC Hypervisor API call numbers
+ */
+#ifdef __ASSEMBLY__
+#define SMC_HYP_CALL(n, x) .equ n, x
+#else /* !__ASSEMBLY__ */
+#define SMC_HYP_CALL(n, x) asm(".equ " #n ", " #x);
+#endif /* __ASSEMBLY__ */
+
+SMC_HYP_CALL(SMCHYP_HVBAR_W , 0xfffffff0)
#endif /* __ARM_KVM_ASM_H__ */
@@ -31,6 +31,7 @@ struct kvm_vcpu;
u32* kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
struct kvm_arch {
+ pgd_t *pgd; /* 1-level 2nd stage table */
};
#define EXCEPTION_NONE 0
new file mode 100644
@@ -0,0 +1,40 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __ARM_KVM_MMU_H__
+#define __ARM_KVM_MMU_H__
+
+/*
+ * The architecture supports 40-bit IPA as input to the 2nd stage translations
+ * and PTRS_PER_PGD2 could therefore be 1024.
+ *
+ * To save a bit of memory and to avoid alignment issues we assume 39-bit IPA
+ * for now, but remember that the level-1 table must be aligned to its size.
+ */
+#define PTRS_PER_PGD2 512
+#define PGD2_ORDER get_order(PTRS_PER_PGD2 * sizeof(pgd_t))
+
+extern pgd_t *kvm_hyp_pgd;
+
+int create_hyp_mappings(pgd_t *hyp_pgd,
+ unsigned long start,
+ unsigned long end);
+void remove_hyp_mappings(pgd_t *hyp_pgd,
+ unsigned long start,
+ unsigned long end);
+void free_hyp_pmds(pgd_t *hyp_pgd);
+
+#endif /* __ARM_KVM_MMU_H__ */
@@ -31,6 +31,9 @@
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
#define PMD_BIT4 (_AT(pmdval_t, 0))
#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
+#define PMD_APTABLE_SHIFT (61)
+#define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT)
+#define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59)
/*
* - section
@@ -43,8 +46,10 @@
#ifdef __ASSEMBLY__
/* avoid 'shift count out of range' warning */
#define PMD_SECT_XN (0)
+#define PMD_SECT_PXN (0)
#else
#define PMD_SECT_XN ((pmdval_t)1 << 54)
+#define PMD_SECT_PXN ((pmdval_t)1 << 53)
#endif
#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
@@ -87,6 +87,7 @@ extern pgprot_t pgprot_kernel;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
+#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_USER)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
@@ -217,6 +218,10 @@ static inline pmd_t *pgd_page_vaddr(pgd_t pgd)
#ifdef CONFIG_ARM_LPAE
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
+#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_TABLE)
+#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_SECT)
#define copy_pmd(pmdpd,pmdps) \
do { \
@@ -22,13 +22,31 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/unified.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/mman.h>
+#include <asm/tlbflush.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
#include "trace.h"
+static void *kvm_arm_hyp_stack_page = NULL;
+extern unsigned long __kvm_hyp_init, __kvm_hyp_init_end;
+extern unsigned long __kvm_hyp_vector, __kvm_hyp_vector_end;
+extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+extern unsigned long __kvm_vcpu_run_end;
+
+/* The VMID used in the VTTBR */
+#define VMID_SIZE (1<<8)
+static DECLARE_BITMAP(kvm_vmids, VMID_SIZE);
+static DEFINE_MUTEX(kvm_vmids_mutex);
+
int kvm_arch_hardware_enable(void *garbage)
{
return 0;
@@ -36,6 +54,7 @@ int kvm_arch_hardware_enable(void *garbage)
void kvm_arch_hardware_disable(void *garbage)
{
+ /* There is no need for this now, so we just ignore that */
}
int kvm_arch_hardware_setup(void)
@@ -297,13 +316,157 @@ long kvm_arch_vm_ioctl(struct file *filp,
return -EINVAL;
}
+static int init_hyp_mode(void)
+{
+ phys_addr_t init_phys_addr, init_end_phys_addr;
+ unsigned long vector_ptr, hyp_stack_ptr;
+ int err = 0;
+
+ /*
+ * Allocate Hyp level-1 page table
+ */
+ kvm_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+ if (!kvm_hyp_pgd)
+ return -ENOMEM;
+
+ /*
+ * Allocate stack page for Hypervisor-mode
+ */
+ kvm_arm_hyp_stack_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!kvm_arm_hyp_stack_page) {
+ err = -ENOMEM;
+ goto out_free_pgd;
+ }
+
+ hyp_stack_ptr = (unsigned long)kvm_arm_hyp_stack_page + PAGE_SIZE;
+
+ init_phys_addr = virt_to_phys((void *)&__kvm_hyp_init);
+ init_end_phys_addr = virt_to_phys((void *)&__kvm_hyp_init_end);
+
+ /*
+ * Create identity mapping
+ */
+ hyp_identity_mapping_add(kvm_hyp_pgd,
+ (unsigned long)init_phys_addr,
+ (unsigned long)init_end_phys_addr);
+
+ /*
+ * Set the HVBAR
+ */
+ BUG_ON(init_phys_addr & 0x1f);
+ asm volatile (
+ "mov r0, %[vector_ptr]\n\t"
+ "ldr r7, =SMCHYP_HVBAR_W\n\t"
+ "smc #0\n\t" :
+ : [vector_ptr] "r" ((unsigned long)init_phys_addr)
+ : "r0", "r7");
+
+ /*
+ * Call initialization code
+ */
+ asm volatile (
+ "mov r0, %[pgd_ptr]\n\t"
+ "mov r1, %[stack_ptr]\n\t"
+ "hvc #0\n\t" :
+ : [pgd_ptr] "r" (virt_to_phys(kvm_hyp_pgd)),
+ [stack_ptr] "r" (hyp_stack_ptr)
+ : "r0", "r1");
+
+ /*
+ * Unmap the identity mapping
+ */
+ hyp_identity_mapping_del(kvm_hyp_pgd,
+ (unsigned long)init_phys_addr,
+ (unsigned long)init_end_phys_addr);
+
+ /*
+ * Set the HVBAR to the virtual kernel address
+ */
+ vector_ptr = (unsigned long)&__kvm_hyp_vector;
+ asm volatile (
+ "mov r0, %[vector_ptr]\n\t"
+ "ldr r7, =SMCHYP_HVBAR_W\n\t"
+ "smc #0\n\t" :
+ : [vector_ptr] "r" ((unsigned long)vector_ptr)
+ : "r0", "r7");
+
+ return err;
+out_free_pgd:
+ kfree(kvm_hyp_pgd);
+ kvm_hyp_pgd = NULL;
+ return err;
+}
+
+static int init_hyp_memory(void)
+{
+ int err = 0;
+ unsigned long start, end;
+
+ /*
+ * Map Hyp exception vectors
+ */
+ start = (unsigned long)&__kvm_hyp_vector;
+ end = (unsigned long)&__kvm_hyp_vector_end;
+ err = create_hyp_mappings(kvm_hyp_pgd, start, end);
+ if (err)
+ goto out_free_mappings;
+
+ /*
+ * Map the world-switch code
+ */
+ start = (unsigned long)&__kvm_vcpu_run;
+ end = (unsigned long)&__kvm_vcpu_run_end;
+ err = create_hyp_mappings(kvm_hyp_pgd, start, end);
+ if (err)
+ goto out_free_mappings;
+
+ /*
+ * Map the Hyp stack page
+ */
+ start = (unsigned long)kvm_arm_hyp_stack_page;
+ end = start + PAGE_SIZE - 1;
+ err = create_hyp_mappings(kvm_hyp_pgd, start, end);
+ if (err)
+ goto out_free_mappings;
+
+ /* TODO: Is this necessary? */
+ flush_tlb_all();
+
+ /* TODO: Is this necessary? */
+ __asm__ volatile ("dsb\n\t"
+ "isb\n\t");
+
+ return err;
+out_free_mappings:
+ free_hyp_pmds(kvm_hyp_pgd);
+ return err;
+}
+
int kvm_arch_init(void *opaque)
{
+ int err;
+
+ err = init_hyp_mode();
+ if (err)
+ goto out_err;
+
+ err = init_hyp_memory();
+ if (err)
+ goto out_err;
+
+ set_bit(0, kvm_vmids);
return 0;
+out_err:
+ return err;
}
void kvm_arch_exit(void)
{
+ if (kvm_hyp_pgd) {
+ free_hyp_pmds(kvm_hyp_pgd);
+ kfree(kvm_hyp_pgd);
+ kvm_hyp_pgd = NULL;
+ }
}
static int k_show(struct seq_file *m, void *v)
@@ -13,5 +13,129 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
+
+#include <linux/linkage.h>
+#include <asm/unified.h>
+#include <asm/page.h>
#include <asm/asm-offsets.h>
#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@ Hypervisor initialization
+@ - should be called with:
+@ r0 = Hypervisor pgd pointer
+@ r1 = top of Hyp stack (kernel VA)
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ .text
+ .align 12
+__kvm_hyp_init:
+ .globl __kvm_hyp_init
+
+ @ Hyp-mode exception vector
+ nop
+ nop
+ nop
+ nop
+ nop
+ b __do_hyp_init
+ nop
+ nop
+
+__do_hyp_init:
+ @ Set the sp to end of this page and push data for later use
+ mov sp, pc
+ bic sp, sp, #0x0ff
+ bic sp, sp, #0xf00
+ add sp, sp, #0x1000
+ push {r1, r2, r12}
+
+ @ Set the HTTBR to be the same as the TTBR1 holding the kernel
+ @ level-1 page table
+ mrrc p15, 1, r1, r2, c2
+ @mov r1, #0
+ mcrr p15, 4, r0, r2, c2
+
+ @ Set the HTCR and VTCR to the same shareability and cacheability
+ @ settings as the non-secure TTBCR and with T0SZ == 0.
+ mrc p15, 4, r0, c2, c0, 2 @ HTCR
+ ldr r12, =HTCR_MASK
+ bic r0, r0, r12
+ mrc p15, 0, r1, c2, c0, 2 @ TTBCR
+ and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
+ orr r0, r0, r1
+ mcr p15, 4, r0, c2, c0, 2 @ HTCR
+
+ mrc p15, 4, r1, c2, c1, 2 @ VTCR
+ bic r1, r1, #(VTCR_HTCR_SH | VTCR_SL0)
+ bic r0, r0, #(~VTCR_HTCR_SH)
+ orr r1, r0, r1
+ orr r1, r1, #(VTCR_SL_L1 | VTCR_GUEST_T0SZ)
+ mcr p15, 4, r1, c2, c1, 2 @ VTCR
+
+ @ Use the same memory attributes for hyp. accesses as the kernel
+ @ (copy MAIRx ro HMAIRx).
+ mrc p15, 0, r0, c10, c2, 0
+ mcr p15, 4, r0, c10, c2, 0
+ mrc p15, 0, r0, c10, c2, 1
+ mcr p15, 4, r0, c10, c2, 1
+
+ @ Set the HSCTLR to:
+ @ - ARM/THUMB exceptions: Kernel config
+ @ - Endianness: Kernel config
+ @ - Fast Interrupt Features: Kernel config
+ @ - Write permission implies XN: disabled
+ @ - Instruction cache: enabled
+ @ - Data/Unified cache: enabled
+ @ - Memory alignment checks: enabled
+ @ - MMU: enabled (this code must be run from an identity mapping)
+ mrc p15, 4, r0, c1, c0, 0 @ HSCR
+ ldr r12, =HSCTLR_MASK
+ bic r0, r0, r12
+ mrc p15, 0, r1, c1, c0, 0 @ SCTLR
+ ldr r12, =(HSCTLR_TE | HSCTLR_EE | HSCTLR_FI)
+ and r1, r1, r12
+ ldr r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_I)
+ orr r1, r1, r12
+ orr r0, r0, r1
+ isb
+ mcr p15, 4, r0, c1, c0, 0 @ HSCR
+ isb
+
+ @ Set stack pointer and return to the kernel
+ pop {r1, r2, r12}
+ mov sp, r1
+ eret
+
+ .ltorg
+
+ .align 12
+
+ __kvm_init_sp:
+ .globl __kvm_hyp_init_end
+__kvm_hyp_init_end:
+
+
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@ Hypervisor world-switch code
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+
+/*
+ * This is a stub
+ */
+ENTRY(__kvm_vcpu_run)
+ mov pc, lr
+__kvm_vcpu_run_end:
+ .globl __kvm_vcpu_run_end
+
+
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@ Hypervisor exception vector and handlers
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+
+ .align 5
+__kvm_hyp_vector:
+ .globl __kvm_hyp_vector
+ nop
+__kvm_hyp_vector_end:
+ .globl __kvm_hyp_vector_end
@@ -0,0 +1,255 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/mman.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+#include <asm/pgalloc.h>
+
+#include "../mm/mm.h"
+#include "trace.h"
+
+extern struct mm_struct init_mm;
+
+pgd_t *kvm_hyp_pgd = NULL;
+
+static void free_hyp_ptes(pmd_t *hyp_pmd, unsigned long addr)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned int i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
+ pmd = hyp_pmd + i;
+ if (!pmd_none(*pmd) && pmd_table(*pmd)) {
+ pte = pte_offset_kernel(hyp_pmd, addr);
+ pte_free_kernel(NULL, pte);
+ }
+ }
+}
+
+/*
+ * Free a Hyp-mode level-2 tables and child level-3 tables.
+ */
+void free_hyp_pmds(pgd_t *hyp_pgd)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ unsigned long addr, next, end;
+
+ addr = PAGE_OFFSET;
+ end = ~0;
+ do {
+ next = pgd_addr_end(addr, (~0));
+ pgd = hyp_pgd + pgd_index(addr);
+
+ BUG_ON(pgd_bad(*pgd));
+
+ if (pgd_none(*pgd))
+ continue;
+
+ pmd = pmd_offset(pgd, addr);
+ free_hyp_ptes(pmd, addr);
+ pmd_free(NULL, pmd);
+ } while (addr = next, addr != end);
+}
+
+static void remove_hyp_pte_mappings(pmd_t *pmd, unsigned long addr,
+ unsigned long end)
+{
+ pte_t *pte;
+
+ do {
+ pte = pte_offset_kernel(pmd, addr);
+ pte_clear(NULL, addr, pte);
+ } while (addr += PAGE_SIZE, addr < end);
+}
+
+static void remove_hyp_pmd_mappings(pgd_t *pgd, unsigned long addr,
+ unsigned long end)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ do {
+ next = pmd_addr_end(addr, end);
+ pmd = pmd_offset(pgd, addr);
+
+ BUG_ON(pmd_sect(*pmd));
+
+ if (!pmd_none(*pmd))
+ remove_hyp_pte_mappings(pmd, addr, next);
+ } while (addr = next, addr < end);
+}
+
+/*
+ * Clear hypervisor mappings from specified range (doesn't actually free the
+ * page tables.
+ */
+void remove_hyp_mappings(pgd_t *hyp_pgd, unsigned long start,
+ unsigned long end)
+{
+ pgd_t *pgd;
+ unsigned long addr, next;
+
+ BUG_ON(start > end);
+ BUG_ON(start < PAGE_OFFSET);
+
+ addr = start;
+ do {
+ next = pgd_addr_end(addr, end);
+ pgd = hyp_pgd + pgd_index(addr);
+
+ BUG_ON(pgd_bad(*pgd));
+
+ if (pgd_none(*pgd))
+ continue;
+
+ remove_hyp_pmd_mappings(pgd, addr, next);
+ } while (addr = next, addr < end);
+}
+
+extern unsigned long __kvm_hyp_vector, __kvm_hyp_vector_end;
+
+static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long addr,
+ unsigned long end)
+{
+ pte_t *pte;
+ struct page *page;
+
+ addr &= PAGE_MASK;
+ do {
+ pte = pte_offset_kernel(pmd, addr);
+ BUG_ON(!virt_addr_valid(addr));
+ page = virt_to_page(addr);
+
+ set_pte_ext(pte, mk_pte(page, PAGE_HYP), 0);
+ } while (addr += PAGE_SIZE, addr < end);
+}
+
+static int create_hyp_pmd_mappings(pgd_t *pgd, unsigned long addr,
+ unsigned long end)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long next;
+
+ do {
+ next = pmd_addr_end(addr, end);
+ pmd = pmd_offset(pgd, addr);
+
+ BUG_ON(pmd_sect(*pmd));
+
+ if (pmd_none(*pmd)) {
+ pte = pte_alloc_one_kernel(NULL, addr);
+ if (!pte) {
+ kvm_err(-ENOMEM, "Cannot allocate Hyp pte");
+ return -ENOMEM;
+ }
+ pmd_populate_kernel(NULL, pmd, pte);
+ }
+
+ create_hyp_pte_mappings(pmd, addr, next);
+ } while (addr = next, addr < end);
+
+ return 0;
+}
+
+/*
+ * Map the requested kernel virtual address range to their corresponing physical
+ * addresses in the hyp table.
+ *
+ * @hyp_pgd: The allocated hypervisor level-1 table
+ * @start: The virtual kernel start address of the range
+ * @end: The virtual kernel end address of the range
+ */
+int create_hyp_mappings(pgd_t *hyp_pgd, unsigned long start, unsigned long end)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ unsigned long addr, next;
+ int err = 0;
+
+ BUG_ON(start > end);
+ if (start < PAGE_OFFSET)
+ return -EINVAL;
+
+ addr = start;
+ do {
+ next = pgd_addr_end(addr, end);
+ pgd = hyp_pgd + pgd_index(addr);
+
+ if (pgd_none_or_clear_bad(pgd)) {
+ pmd = pmd_alloc_one(NULL, addr);
+ if (!pmd) {
+ kvm_err(-ENOMEM, "Cannot allocate Hyp pmd");
+ return -ENOMEM;
+ }
+ pgd_populate(NULL, pgd, pmd);
+ }
+
+ err = create_hyp_pmd_mappings(pgd, addr, next);
+ if (err)
+ return err;
+ } while (addr = next, addr < end);
+
+ return err;
+}
+
+/*
+ * Allocate level-1 translation table for stage-2 translation.
+ */
+int kvm_alloc_stage2_pgd(struct kvm *kvm)
+{
+ pgd_t *pgd;
+
+ if (kvm->arch.pgd != NULL) {
+ kvm_err(-EINVAL, "kvm_arch already initialized?\n");
+ return -EINVAL;
+ }
+
+ pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD2_ORDER);
+ if (!pgd)
+ return -ENOMEM;
+
+ memset(pgd, 0, PTRS_PER_PGD2 * sizeof(pgd_t));
+ kvm->arch.pgd = pgd;
+
+ return 0;
+}
+
+/*
+ * Free level-1 translation table for stage-2 translation and all belonging
+ * level-2 and level-3 tables.
+ */
+void kvm_free_stage2_pgd(struct kvm *kvm)
+{
+ if (kvm->arch.pgd == NULL)
+ return;
+
+ free_pages((unsigned long)kvm->arch.pgd, PGD2_ORDER);
+ kvm->arch.pgd = NULL;
+
+ /* TODO: Free child tables */
+ KVMARM_NOT_IMPLEMENTED();
+}
+
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ KVMARM_NOT_IMPLEMENTED();
+ return -EINVAL;
+}