@@ -501,6 +501,11 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}
+static inline u64 compute_counter_value(struct arch_timer_context *ctxt)
+{
+ return arch_timer_read_cntpct_el0() - timer_get_offset(ctxt);
+}
+
static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *ctxt;
@@ -296,6 +296,13 @@ static bool kvm_hyp_handle_timer(struct kvm_vcpu *vcpu, u64 *exit_code)
val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
}
break;
+ case SYS_CNTPCT_EL0:
+ case SYS_CNTPCTSS_EL0:
+ /* If !ELIsInHost(EL0), the guest's CNTPOFF_EL2 applies */
+ val = compute_counter_value(!(vcpu_el2_e2h_is_set(vcpu) &&
+ vcpu_el2_tge_is_set(vcpu)) ?
+ vcpu_ptimer(vcpu) : vcpu_hptimer(vcpu));
+ break;
case SYS_CNTV_CTL_EL02:
val = __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
break;
@@ -314,6 +321,12 @@ static bool kvm_hyp_handle_timer(struct kvm_vcpu *vcpu, u64 *exit_code)
else
val = __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
break;
+ case SYS_CNTVCT_EL0:
+ case SYS_CNTVCTSS_EL0:
+ /* If !ELIsInHost(EL2), the guest's CNTVOFF_EL2 applies */
+ val = compute_counter_value(!vcpu_el2_e2h_is_set(vcpu) ?
+ vcpu_vtimer(vcpu) : vcpu_hvtimer(vcpu));
+ break;
default:
return false;
}
Similarly to handling the physical timer accesses early when FEAT_ECV causes a trap, we try to handle the physical counter without returning to the general sysreg handling. More surprisingly, we introduce something similar for the virtual counter. Although this isn't necessary yet, it will prove useful on systems that have a broken CNTVOFF_EL2 implementation. Yes, they exist. Special care is taken to offset reads of the counter with the host's CNTPOFF_EL2, as we perform this with TGE clear. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/hyp/include/hyp/switch.h | 5 +++++ arch/arm64/kvm/hyp/vhe/switch.c | 13 +++++++++++++ 2 files changed, 18 insertions(+)