@@ -759,21 +759,6 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
timer_irq(map->direct_ptimer),
&arch_timer_irq_ops);
WARN_ON_ONCE(ret);
-
- /*
- * The virtual offset behaviour is "interesting", as it
- * always applies when HCR_EL2.E2H==0, but only when
- * accessed from EL1 when HCR_EL2.E2H==1. So make sure we
- * track E2H when putting the HV timer in "direct" mode.
- */
- if (map->direct_vtimer == vcpu_hvtimer(vcpu)) {
- struct arch_timer_offset *offs = &map->direct_vtimer->offset;
-
- if (vcpu_el2_e2h_is_set(vcpu))
- offs->vcpu_offset = NULL;
- else
- offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
- }
}
}
@@ -1045,18 +1030,6 @@ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
for (int i = 0; i < nr_timers(vcpu); i++)
timer_set_ctl(vcpu_get_timer(vcpu, i), 0);
- /*
- * A vcpu running at EL2 is in charge of the offset applied to
- * the virtual timer, so use the physical VM offset, and point
- * the vcpu offset to CNTVOFF_EL2.
- */
- if (vcpu_has_nv(vcpu)) {
- struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
-
- offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
- offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
- }
-
if (timer->enabled) {
for (int i = 0; i < nr_timers(vcpu); i++)
kvm_timer_update_irq(vcpu, false,
@@ -1102,6 +1075,27 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
}
}
+void kvm_timer_vcpu_nv_init(struct kvm_vcpu *vcpu)
+{
+ /*
+ * A vcpu running at EL2 is in charge of the offset applied to
+ * the virtual timer, so use the physical VM offset, and point
+ * the vcpu offset to CNTVOFF_EL2.
+ *
+ * The virtual offset behaviour is "interesting", as it always
+ * applies when HCR_EL2.E2H==0, but only when accessed from EL1 when
+ * HCR_EL2.E2H==1. Apply it to the HV timer when E2H==0.
+ */
+ struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
+ u64 *voff = __ctxt_sys_reg(&vcpu->arch.ctxt, CNTVOFF_EL2);
+
+ offs->vcpu_offset = voff;
+ offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
+
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ vcpu_hvtimer(vcpu)->offset.vcpu_offset = voff;
+}
+
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
@@ -815,6 +815,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (ret)
return ret;
+ if (vcpu_has_nv(vcpu))
+ kvm_timer_vcpu_nv_init(vcpu);
+
/*
* This needs to happen after any restriction has been applied
* to the feature set.
@@ -98,6 +98,7 @@ int __init kvm_timer_hyp_init(bool has_gic);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_timer_vcpu_nv_init(struct kvm_vcpu *vcpu);
void kvm_timer_sync_nested(struct kvm_vcpu *vcpu);
void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
The way we configure the virtual timers with NV is rather odd: - the EL1 virtual timer gets setup in kvm_timer_vcpu_reset(). Why not? - the EL2 virtual timer gets setup at vcpu_load time, which is really bizarre, because this really should be a one-off. The reason for the second point is that this setup is conditionned on HCR_EL2.E2H, as it decides whether CNTVOFF_EL2 applies to the EL2 virtual counter or not. And of course, this is not known at the point where we reset the timer. Huh. Solve this by introducing a NV-specific init for the timers, matching what we do for the other subsystems, that gets called once we know for sure that the configuration is final (on first vcpu run, effectively). This makes kvm_timer_vcpu_load_nested_switch() slightly simpler. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/arch_timer.c | 48 ++++++++++++++++-------------------- arch/arm64/kvm/arm.c | 3 +++ include/kvm/arm_arch_timer.h | 1 + 3 files changed, 25 insertions(+), 27 deletions(-)