@@ -162,6 +162,13 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
{
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+
+ if (kvm_is_realm(vcpu->kvm)) {
+ WARN_ON(offset);
+ return;
+ }
+
if (!ctxt->offset.vm_offset) {
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
return;
@@ -459,6 +466,21 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
}
}
+void kvm_realm_timers_update(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
+ int i;
+
+ for (i = 0; i < NR_KVM_EL0_TIMERS; i++) {
+ struct arch_timer_context *timer = &arch_timer->timers[i];
+ bool status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT;
+ bool level = kvm_timer_irq_can_fire(timer) && status;
+
+ if (level != timer->irq.level)
+ kvm_timer_update_irq(vcpu, level, timer);
+ }
+}
+
/* Only called for a fully emulated timer */
static void timer_emulate(struct arch_timer_context *ctx)
{
@@ -830,6 +852,8 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
if (unlikely(!timer->enabled))
return;
+ kvm_timer_unblocking(vcpu);
+
get_timer_map(vcpu, &map);
if (static_branch_likely(&has_gic_active_state)) {
@@ -843,8 +867,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_load_nogic(vcpu);
}
- kvm_timer_unblocking(vcpu);
-
timer_restore_state(map.direct_vtimer);
if (map.direct_ptimer)
timer_restore_state(map.direct_ptimer);
@@ -987,7 +1009,9 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
ctxt->vcpu = vcpu;
- if (timerid == TIMER_VTIMER)
+ if (kvm_is_realm(vcpu->kvm))
+ ctxt->offset.vm_offset = NULL;
+ else if (timerid == TIMER_VTIMER)
ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
else
ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset;
@@ -1010,13 +1034,19 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
+ u64 cntvoff;
for (int i = 0; i < NR_KVM_TIMERS; i++)
timer_context_init(vcpu, i);
+ if (kvm_is_realm(vcpu->kvm))
+ cntvoff = 0;
+ else
+ cntvoff = kvm_phys_timer_read();
+
/* Synchronize offsets across timers of a VM if not already provided */
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) {
- timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read());
+ timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
timer_set_offset(vcpu_ptimer(vcpu), 0);
}
@@ -1524,6 +1554,13 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
+ /*
+ * We don't use mapped IRQs for Realms because the RMI doesn't allow
+ * us setting the LR.HW bit in the VGIC.
+ */
+ if (vcpu_is_rec(vcpu))
+ return 0;
+
get_timer_map(vcpu, &map);
ret = kvm_vgic_map_phys_irq(vcpu,
@@ -112,6 +112,8 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
+void kvm_realm_timers_update(struct kvm_vcpu *vcpu);
+
u64 kvm_phys_timer_read(void);
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
The RMM keeps track of the timer while the realm REC is running, but on exit to the normal world KVM is responsible for handling the timers. A later patch adds the support for propagating the timer values from the exit data structure and calling kvm_realm_timers_update(). Signed-off-by: Steven Price <steven.price@arm.com> --- arch/arm64/kvm/arch_timer.c | 45 ++++++++++++++++++++++++++++++++---- include/kvm/arm_arch_timer.h | 2 ++ 2 files changed, 43 insertions(+), 4 deletions(-)