@@ -75,7 +75,8 @@ int kvm_timer_hyp_init(bool);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_timer_sync_nested(struct kvm_vcpu *vcpu);
+void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
void kvm_timer_update_run(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
@@ -710,6 +710,45 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
set_cntvoff(0);
}
+void kvm_timer_sync_nested(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ARM64
+ if (!is_hyp_ctxt(vcpu))
+ return;
+
+ /*
+ * Guest hypervisors using ARMv8.4 enhanced nested virt support have
+ * their EL1 timer register accesses redirected to the VNCR page.
+ */
+ if (!vcpu_el2_e2h_is_set(vcpu)) {
+ /*
+ * For a non-VHE guest hypervisor, we update the hardware
+ * timer registers with the latest value written by the guest
+ * to the VNCR page and let the hardware take care of the
+ * rest.
+ */
+ write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CTL_EL0), SYS_CNTV_CTL);
+ write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0), SYS_CNTV_CVAL);
+ write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CTL_EL0), SYS_CNTP_CTL);
+ write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0), SYS_CNTP_CVAL);
+ } else {
+ /*
+ * For a VHE guest hypervisor, the emulated state (which
+ * is stored in the VNCR page) could have been updated behind
+ * our back, and we must reset the emulation of the timers.
+ */
+
+ struct timer_map map;
+ get_timer_map(vcpu, &map);
+
+ soft_timer_cancel(&map.emul_vtimer->hrtimer);
+ soft_timer_cancel(&map.emul_ptimer->hrtimer);
+ timer_emulate(map.emul_vtimer);
+ timer_emulate(map.emul_ptimer);
+ }
+#endif
+}
+
/*
* With a userspace irqchip we have to check if the guest de-asserted the
* timer and if so, unmask the timer irq signal on the host interrupt
@@ -728,7 +767,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
}
}
-void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
+void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
@@ -730,7 +730,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
isb(); /* Ensure work in x_flush_hwstate is committed */
kvm_pmu_sync_hwstate(vcpu);
if (static_branch_unlikely(&userspace_irqchip_in_use))
- kvm_timer_sync_hwstate(vcpu);
+ kvm_timer_sync_user(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
preempt_enable();
@@ -781,7 +781,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* timer virtual interrupt state.
*/
if (static_branch_unlikely(&userspace_irqchip_in_use))
- kvm_timer_sync_hwstate(vcpu);
+ kvm_timer_sync_user(vcpu);
+
+ if (enhanced_nested_virt_in_use(vcpu))
+ kvm_timer_sync_nested(vcpu);
kvm_arch_vcpu_ctxsync_fp(vcpu);