@@ -99,5 +99,29 @@
#define VTTBR_X (5 - VTCR_GUEST_T0SZ)
#endif
+/* Hyp Syndrome Register (HSR) bits */
+#define HSR_EC_SHIFT (26)
+#define HSR_EC (0x3fU << HSR_EC_SHIFT)
+#define HSR_IL (1U << 25)
+#define HSR_ISS (HSR_IL - 1)
+
+#define HSR_EC_UNKNOWN (0x00)
+#define HSR_EC_WFI (0x01)
+#define HSR_EC_CP15_32 (0x03)
+#define HSR_EC_CP15_64 (0x04)
+#define HSR_EC_CP14_MR (0x05)
+#define HSR_EC_CP14_LS (0x06)
+#define HSR_EC_CP_0_13 (0x07)
+#define HSR_EC_CP10_ID (0x08)
+#define HSR_EC_JAZELLE (0x09)
+#define HSR_EC_BXJ (0x0A)
+#define HSR_EC_CP14_64 (0x0C)
+#define HSR_EC_SVC_HYP (0x11)
+#define HSR_EC_HVC (0x12)
+#define HSR_EC_SMC (0x13)
+#define HSR_EC_IABT (0x20)
+#define HSR_EC_IABT_HYP (0x21)
+#define HSR_EC_DABT (0x24)
+#define HSR_EC_DABT_HYP (0x25)
#endif /* __KVM_ARM_H__ */
@@ -73,6 +73,8 @@ struct kvm_vcpu_arch {
u64 c2_TTBR1; /* Translation Table Base Register 1 */
u32 c2_TTBCR; /* Translation Table Base Control Register */
u32 c3_DACR; /* Domain Access Control Register */
+ u32 c10_PRRR; /* Primary Region Remap Register */
+ u32 c10_NMRR; /* Normal Memory Remap Register */
} cp15;
u32 exception_pending; /* Exception to raise after emulation */
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/kvm_host.h>
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
@@ -118,5 +119,27 @@ int main(void)
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
+#ifdef CONFIG_KVM
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(VCPU_SCTLR, offsetof(struct kvm_vcpu, arch.cp15.c1_SCTLR));
+ DEFINE(VCPU_CPACR, offsetof(struct kvm_vcpu, arch.cp15.c1_CPACR));
+ DEFINE(VCPU_TTBR0, offsetof(struct kvm_vcpu, arch.cp15.c2_TTBR0));
+ DEFINE(VCPU_TTBR1, offsetof(struct kvm_vcpu, arch.cp15.c2_TTBR1));
+ DEFINE(VCPU_TTBCR, offsetof(struct kvm_vcpu, arch.cp15.c2_TTBCR));
+ DEFINE(VCPU_DACR, offsetof(struct kvm_vcpu, arch.cp15.c3_DACR));
+ DEFINE(VCPU_PRRR, offsetof(struct kvm_vcpu, arch.cp15.c10_PRRR));
+ DEFINE(VCPU_NMRR, offsetof(struct kvm_vcpu, arch.cp15.c10_NMRR));
+ DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs));
+ DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs));
+ DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs));
+ DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs));
+ DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs));
+ DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs));
+ DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.pc));
+ DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.cpsr));
+ DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr));
+ DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
+#endif
return 0;
}
@@ -197,6 +197,7 @@ __dabt_svc:
ENDPROC(__dabt_svc)
.align 5
+ .globl __irq_svc
__irq_svc:
svc_entry
@@ -245,7 +245,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
- KVMARM_NOT_IMPLEMENTED();
+ unsigned long cpsr;
+ unsigned long sctlr;
+
+ /* Init execution CPSR */
+ asm volatile ("mrs %[cpsr], cpsr": [cpsr] "=r" (cpsr));
+ vcpu->arch.regs.cpsr = SVC_MODE | PSR_I_BIT | PSR_F_BIT | PSR_A_BIT |
+ (cpsr & PSR_E_BIT);
+
+ /* Init SCTLR with MMU disabled */
+ asm volatile ("mrc p15, 0, %[sctlr], c1, c0, 0":
+ [sctlr] "=r" (sctlr));
+ vcpu->arch.cp15.c1_SCTLR = sctlr & ~1U;
+
return 0;
}
@@ -287,6 +299,12 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __kvm_vcpu_run(vcpu);
+ local_irq_restore(flags);
+
KVMARM_NOT_IMPLEMENTED();
return -EINVAL;
}
@@ -21,6 +21,12 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
+#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
+#define VCPU_USR_SP (VCPU_USR_REG(13))
+#define VCPU_FIQ_REG(_reg_nr) (VCPU_FIQ_REGS + (_reg_nr * 4))
+#define VCPU_FIQ_SPSR (VCPU_FIQ_REG(7))
+
+
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ Hypervisor initialization
@ - should be called with:
@@ -120,11 +126,269 @@ __kvm_hyp_init_end:
@ Hypervisor world-switch code
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
-/*
- * This is a stub
+.macro store_mode_state base_reg, mode
+ .if \mode == usr
+ mrs r2, SP_usr
+ mov r3, lr
+ stmdb \base_reg!, {r2, r3}
+ .elseif \mode != fiq
+ mrs r2, SP_\mode
+ mrs r3, LR_\mode
+ mrs r4, SPSR_\mode
+ stmdb \base_reg!, {r2, r3, r4}
+ .else
+ mrs r2, r8_fiq
+ mrs r3, r9_fiq
+ mrs r4, r10_fiq
+ mrs r5, r11_fiq
+ mrs r6, r12_fiq
+ mrs r7, SP_fiq
+ mrs r8, LR_fiq
+ mrs r9, SPSR_fiq
+ stmdb \base_reg!, {r2-r9}
+ .endif
+.endm
+
+.macro load_mode_state base_reg, mode
+ .if \mode == usr
+ ldmia \base_reg!, {r2, r3}
+ msr SP_usr, r2
+ mov lr, r3
+ .elseif \mode != fiq
+ ldmia \base_reg!, {r2, r3, r4}
+ msr SP_\mode, r2
+ msr LR_\mode, r3
+ msr SPSR_\mode, r4
+ .else
+ ldmia \base_reg!, {r2-r9}
+ msr r8_fiq, r2
+ msr r9_fiq, r3
+ msr r10_fiq, r4
+ msr r11_fiq, r5
+ msr r12_fiq, r6
+ msr SP_fiq, r7
+ msr LR_fiq, r8
+ msr SPSR_fiq, r9
+ .endif
+.endm
+
+/* Reads cp15 registers from hardware and stores then in memory
+ * @vcpu: If 0, registers are written in-order to the stack,
+ * otherwise to the VCPU struct pointed to by vcpup
+ * @vcpup: Register pointing to VCPU struct
+ */
+.macro read_cp15_state vcpu=0, vcpup
+ mrc p15, 0, r2, c1, c0, 0 @ SCTLR
+ mrc p15, 0, r3, c1, c0, 2 @ CPACR
+ mrc p15, 0, r4, c2, c0, 2 @ TTBCR
+ mcr p15, 0, r5, c3, c0, 0 @ DACR
+ mrrc p15, 0, r6, r7, c2 @ TTBR 0
+ mrrc p15, 1, r8, r9, c2 @ TTBR 1
+ mrc p15, 0, r10, c10, c2, 0 @ PRRR
+ mrc p15, 0, r11, c10, c2, 1 @ NMRR
+
+ .if \vcpu == 0
+ push {r2-r11} @ Push CP15 registers
+ .else
+ str r2, [\vcpup, #VCPU_SCTLR]
+ str r3, [\vcpup, #VCPU_CPACR]
+ str r4, [\vcpup, #VCPU_TTBCR]
+ str r5, [\vcpup, #VCPU_DACR]
+ add \vcpup, \vcpup, #VCPU_TTBR0
+ strd r6, r7, [\vcpup]
+ add \vcpup, \vcpup, #(VCPU_TTBR1 - VCPU_TTBR0)
+ strd r8, r9, [\vcpup]
+ sub \vcpup, \vcpup, #(VCPU_TTBR1)
+ str r10, [\vcpup, #VCPU_PRRR]
+ str r11, [\vcpup, #VCPU_NMRR]
+ .endif
+.endm
+
+/* Reads cp15 registers from memory and writes them to hardware
+ * @vcpu: If 0, registers are read in-order from the stack,
+ * otherwise from the VCPU struct pointed to by vcpup
+ * @vcpup: Register pointing to VCPU struct
*/
+.macro write_cp15_state vcpu=0, vcpup
+ .if \vcpu == 0
+ pop {r2-r11}
+ .else
+ ldr r2, [\vcpup, #VCPU_SCTLR]
+ ldr r3, [\vcpup, #VCPU_CPACR]
+ ldr r4, [\vcpup, #VCPU_TTBCR]
+ ldr r5, [\vcpup, #VCPU_DACR]
+ add \vcpup, \vcpup, #VCPU_TTBR0
+ ldrd r6, r7, [\vcpup]
+ add \vcpup, \vcpup, #(VCPU_TTBR1 - VCPU_TTBR0)
+ ldrd r8, r9, [\vcpup]
+ sub \vcpup, \vcpup, #(VCPU_TTBR1)
+ ldr r10, [\vcpup, #VCPU_PRRR]
+ ldr r11, [\vcpup, #VCPU_NMRR]
+ .endif
+
+ mcr p15, 0, r2, c1, c0, 0 @ SCTLR
+ mcr p15, 0, r3, c1, c0, 2 @ CPACR
+ mcr p15, 0, r4, c2, c0, 2 @ TTBCR
+ mcr p15, 0, r5, c3, c0, 0 @ DACR
+ mcrr p15, 0, r6, r7, c2 @ TTBR 0
+ mcrr p15, 1, r8, r9, c2 @ TTBR 1
+ mcr p15, 0, r10, c10, c2, 0 @ PRRR
+ mcr p15, 0, r11, c10, c2, 1 @ NMRR
+.endm
+
+/* Configures the HSTR (Hyp System Trap Register) on entry/return
+ * (hardware reset value is 0) */
+.macro set_hstr entry
+ mrc p15, 4, r2, c1, c1, 3
+ ldr r3, =0xbe00
+ .if \entry == 1
+ orr r2, r2, r3 @ Trap CR{9,10,11,12,13,15}
+ .else
+ bic r2, r2, r3 @ Don't trap any CRx accesses
+ .endif
+ mcr p15, 4, r2, c1, c1, 3
+.endm
+
+/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi/wfe, trap smc */
+.macro configure_hyp_role entry
+ mrc p15, 4, r2, c1, c1, 0 @ HCR
+ ldr r3, =HCR_GUEST_MASK
+ .if \entry == 1
+ orr r2, r2, r3
+ .else
+ bic r2, r2, r3
+ .endif
+ mcr p15, 4, r2, c1, c1, 0
+.endm
+
+@ This must be called from Hyp mode!
+@ Arguments:
+@ r0: pointer to vcpu struct
ENTRY(__kvm_vcpu_run)
+ hvc #0 @ Change to Hyp-mode
+
+ @ Now we're in Hyp-mode and lr_usr, spsr_hyp are on the stack
+ mrs r2, sp_usr
+ push {r2} @ Push r13_usr
+ push {r4-r12} @ Push r4-r12
+
+ store_mode_state sp, svc
+ store_mode_state sp, abt
+ store_mode_state sp, und
+ store_mode_state sp, irq
+ store_mode_state sp, fiq
+
+ @ Store hardware CP15 state and load guest state
+ read_cp15_state
+ write_cp15_state 1, r0
+
+ push {r0} @ Push the VCPU pointer
+
+ @ Set up guest memory translation
+ ldr r1, [r0, #VCPU_KVM] @ r1 points to kvm struct
+ ldrd r2, r3, [r1, #KVM_VTTBR]
+ mcrr p15, 6, r2, r3, c2 @ Write VTTBR
+
+ @ Configure Hyp-role
+ configure_hyp_role 1
+
+ @ Trap coprocessor CRx for all x except 2 and 14
+ set_hstr 1
+
+ @ Load guest registers
+ add r0, r0, #(VCPU_USR_SP)
+ load_mode_state r0, usr
+ load_mode_state r0, svc
+ load_mode_state r0, abt
+ load_mode_state r0, und
+ load_mode_state r0, irq
+ load_mode_state r0, fiq
+
+ @ Load return state (r0 now points to vcpu->arch.regs.pc)
+ ldmia r0, {r2, r3}
+ msr ELR_hyp, r2
+ msr spsr, r3
+
+ @ Load remaining registers and do the switch
+ sub r0, r0, #(VCPU_PC - VCPU_USR_REGS)
+ ldmia r0, {r0-r12}
+ eret
+
+__kvm_vcpu_return:
+ @ Store return state
+ mrs r2, ELR_hyp
+ mrs r3, spsr
+ str r2, [r1, #VCPU_PC]
+ str r3, [r1, #VCPU_CPSR]
+
+ @ Store guest registers
+ add r1, r1, #(VCPU_FIQ_SPSR + 4)
+ store_mode_state r1, fiq
+ store_mode_state r1, irq
+ store_mode_state r1, und
+ store_mode_state r1, abt
+ store_mode_state r1, svc
+ store_mode_state r1, usr
+
+ @ Don't trap coprocessor accesses for host kernel
+ set_hstr 0
+
+ @ Reset Hyp-role
+ configure_hyp_role 0
+
+ @ Set VMID == 0
+ mov r2, #0
+ mov r3, #0
+ mcrr p15, 6, r2, r3, c2 @ Write VTTBR
+
+ @ Store guest CP15 state and restore host state
+ read_cp15_state 1, r1
+ write_cp15_state
+
+ load_mode_state sp, fiq
+ load_mode_state sp, irq
+ load_mode_state sp, und
+ load_mode_state sp, abt
+ load_mode_state sp, svc
+
+ pop {r4-r12} @ Pop r4-r12
+ pop {r2} @ Pop r13_usr
+ msr sp_usr, r2
+
+ hvc #0
+
+ cmp r0, #ARM_EXCEPTION_IRQ
+ bne return_to_ioctl
+
+ /*
+ * It's time to launch the kernel IRQ handler for IRQ exceptions. This
+ * requires some manipulation though.
+ *
+ * - The easiest entry point to the host handler is __irq_svc.
+ * - The __irq_svc expects to be called from SVC mode, which has been
+ * switched to from vector_stub code in entry-armv.S. The __irq_svc calls
+ * svc_entry which uses values stored in memory and pointed to by r0
+ * to return from handler. We allocate this memory on the stack, which
+ * will contain these values:
+ * 0x8: cpsr
+ * 0x4: return_address
+ * 0x0: r0
+ */
+ adr r1, irq_kernel_resume @ Where to resume
+ mrs r2, cpsr @ CPSR when we return
+ push {r0 - r2}
+ mov r0, sp
+ b __irq_svc
+
+irq_kernel_resume:
+ pop {r0}
+ add sp, sp, #8
+
+return_to_ioctl:
mov pc, lr
+
+ .ltorg
+
__kvm_vcpu_run_end:
.globl __kvm_vcpu_run_end
@@ -136,6 +400,109 @@ __kvm_vcpu_run_end:
.align 5
__kvm_hyp_vector:
.globl __kvm_hyp_vector
- nop
+
+ @ Hyp-mode exception vector
+ b hyp_reset
+ b hyp_undef
+ b hyp_svc
+ b hyp_pabt
+ b hyp_dabt
+ b hyp_hvc
+ b hyp_irq
+ b hyp_fiq
+
+ .align
+hyp_reset:
+ sub pc, pc, #8
+
+ .align
+hyp_undef:
+ sub pc, pc, #8
+
+ .align
+hyp_svc:
+ @ Can only get here if HVC or SVC is called from Hyp, mode which means
+ @ we want to change mode back to SVC mode.
+ @ NB: Stack pointer should be where hyp_hvc handler left it!
+ ldr lr, [sp, #4]
+ msr spsr, lr
+ ldr lr, [sp]
+ add sp, sp, #8
+ eret
+
+ .align
+hyp_pabt:
+ sub pc, pc, #8
+
+ .align
+hyp_dabt:
+ sub pc, pc, #8
+
+ .align
+hyp_hvc:
+ @ Getting here is either becuase of a trap from a guest or from calling
+ @ HVC from the host kernel, which means "switch to Hyp mode".
+ push {r0, r1, r2}
+
+ @ Check syndrome register
+ mrc p15, 4, r0, c5, c2, 0 @ HSR
+ lsr r1, r0, #HSR_EC_SHIFT
+ cmp r1, #HSR_EC_HVC
+ bne guest_trap @ Not HVC instr.
+
+ @ Let's check if the HVC came from VMID 0 and allow simple
+ @ switch to Hyp mode
+ mrrc p15, 6, r1, r2, c2
+ lsr r2, r2, #16
+ and r2, r2, #0xff
+ cmp r2, #0
+ bne guest_trap @ Guest called HVC
+
+ pop {r0, r1, r2}
+
+ @ Store lr_usr,spsr (svc cpsr) on stack
+ sub sp, sp, #8
+ str lr, [sp]
+ mrs lr, spsr
+ str lr, [sp, #4]
+
+ @ Return to caller in Hyp mode
+ mrs lr, ELR_hyp
+ mov pc, lr
+
+ @ Not HVC from VMID 0 - this requires more careful investigation
+ @ TODO: Not implemented
+guest_trap:
+ ldr r1, [sp, #12] @ Load VCPU pointer
+ str r0, [r1, #VCPU_HSR]
+ add r1, r1, #VCPU_USR_REG(3)
+ stmia r1, {r3-r12}
+ sub r1, r1, #(VCPU_USR_REG(3) - VCPU_USR_REG(0))
+ pop {r3, r4, r5}
+ add sp, sp, #4 @ We loaded the VCPU pointer above
+ stmia r1, {r3, r4, r5}
+ sub r1, r1, #VCPU_USR_REG(0)
+
+ mov r0, #ARM_EXCEPTION_HVC
+ b __kvm_vcpu_return
+
+ .align
+hyp_irq:
+ push {r0}
+ ldr r0, [sp, #4] @ Load VCPU pointer
+ add r0, r0, #(VCPU_USR_REG(1))
+ stmia r0, {r1-r12}
+ pop {r0, r1} @ r1 == vcpu pointer
+ str r0, [r1, #VCPU_USR_REG(0)]
+
+ mov r0, #ARM_EXCEPTION_IRQ
+ b __kvm_vcpu_return
+
+ .align
+hyp_fiq:
+ sub pc, pc, #8
+
+ .ltorg
+
__kvm_hyp_vector_end:
.globl __kvm_hyp_vector_end