@@ -177,6 +177,14 @@ int main(void)
DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
+#ifdef CONFIG_KVM_ARM_TIMER
+ DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
+ DEFINE(VCPU_TIMER_CNTV_CVALH, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval32.high));
+ DEFINE(VCPU_TIMER_CNTV_CVALL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval32.low));
+ DEFINE(KVM_TIMER_CNTVOFF_H, offsetof(struct kvm, arch.timer.cntvoff32.high));
+ DEFINE(KVM_TIMER_CNTVOFF_L, offsetof(struct kvm, arch.timer.cntvoff32.low));
+ DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
+#endif
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
#endif
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
@@ -660,6 +660,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
update_vttbr(vcpu->kvm);
kvm_vgic_sync_to_cpu(vcpu);
+ kvm_timer_sync_to_cpu(vcpu);
local_irq_disable();
@@ -673,6 +674,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
local_irq_enable();
+ kvm_timer_sync_from_cpu(vcpu);
kvm_vgic_sync_from_cpu(vcpu);
continue;
}
@@ -712,6 +714,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* Back from guest
*************************************************************/
+ kvm_timer_sync_from_cpu(vcpu);
kvm_vgic_sync_from_cpu(vcpu);
ret = handle_exit(vcpu, run, ret);
@@ -422,6 +422,25 @@
#define CNTHCTL_PL1PCEN (1 << 1)
.macro save_timer_state vcpup
+#ifdef CONFIG_KVM_ARM_TIMER
+ ldr r4, [\vcpup, #VCPU_KVM]
+ ldr r2, [r4, #KVM_TIMER_ENABLED]
+ cmp r2, #0
+ beq 1f
+
+ mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+ and r2, #3
+ str r2, [\vcpup, #VCPU_TIMER_CNTV_CTL]
+ bic r2, #1 @ Clear ENABLE
+ mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+ isb
+
+ mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL
+ str r3, [\vcpup, #VCPU_TIMER_CNTV_CVALH]
+ str r2, [\vcpup, #VCPU_TIMER_CNTV_CVALL]
+
+1:
+#endif
@ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
@@ -435,6 +454,28 @@
orr r2, r2, #CNTHCTL_PL1PCTEN
bic r2, r2, #CNTHCTL_PL1PCEN
mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
+
+#ifdef CONFIG_KVM_ARM_TIMER
+ ldr r4, [\vcpup, #VCPU_KVM]
+ ldr r2, [r4, #KVM_TIMER_ENABLED]
+ cmp r2, #0
+ beq 1f
+
+ ldr r3, [r4, #KVM_TIMER_CNTVOFF_H]
+ ldr r2, [r4, #KVM_TIMER_CNTVOFF_L]
+ mcrr p15, 4, r2, r3, c14 @ CNTVOFF
+ isb
+
+ ldr r3, [\vcpup, #VCPU_TIMER_CNTV_CVALH]
+ ldr r2, [\vcpup, #VCPU_TIMER_CNTV_CVALL]
+ mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL
+
+ ldr r2, [\vcpup, #VCPU_TIMER_CNTV_CTL]
+ and r2, #3
+ mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+ isb
+1:
+#endif
.endm
/* Configures the HSTR (Hyp System Trap Register) on entry/return