@@ -281,6 +281,12 @@ enum vcpu_sysreg {
VNCR(ICH_HCR_EL2),
VNCR(ICH_VMCR_EL2),
+ VNCR(CNTVOFF_EL2),
+ VNCR(CNTV_CVAL_EL0),
+ VNCR(CNTV_CTL_EL0),
+ VNCR(CNTP_CVAL_EL0),
+ VNCR(CNTP_CTL_EL0),
+
NR_SYS_REGS /* Nothing after this line! */
};
@@ -40,4 +40,5 @@ kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
kvm-$(CONFIG_KVM_ARM_HOST) += nested.o
kvm-$(CONFIG_KVM_ARM_HOST) += emulate-nested.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer_nested.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3-nested.o
@@ -805,6 +805,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
struct arch_timer_context *hvtimer = vcpu_hvtimer(vcpu);
struct arch_timer_context *hptimer = vcpu_hptimer(vcpu);
+ vtimer->vcpu = vcpu;
+ ptimer->vcpu = vcpu;
+ hvtimer->vcpu = vcpu;
+ hptimer->vcpu = vcpu;
+
/* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
timer_set_offset(ptimer, 0);
@@ -838,11 +843,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
hvtimer->host_timer_irq_flags = host_vtimer_irq_flags;
hptimer->host_timer_irq_flags = host_ptimer_irq_flags;
-
- vtimer->vcpu = vcpu;
- ptimer->vcpu = vcpu;
- hvtimer->vcpu = vcpu;
- hptimer->vcpu = vcpu;
}
static void kvm_timer_init_interrupt(void *info)
new file mode 100644
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 ARM Ltd.
+ * Author: Marc Zyngier <maz@kernel.org>
+ *
+ * Override timer accessors to be nested-capable on ARMv8.4.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+
+u32 timer_get_ctl(struct arch_timer_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+
+ switch(arch_timer_ctx_index(ctxt)) {
+ case TIMER_VTIMER:
+ return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
+ case TIMER_PTIMER:
+ return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
+ default:
+ return ctxt->cnt_ctl;
+ }
+}
+
+u64 timer_get_cval(struct arch_timer_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+
+ switch(arch_timer_ctx_index(ctxt)) {
+ case TIMER_VTIMER:
+ return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
+ case TIMER_PTIMER:
+ return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
+ default:
+ return ctxt->cnt_cval;
+ }
+}
+
+u64 timer_get_offset(struct arch_timer_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+
+ switch(arch_timer_ctx_index(ctxt)) {
+ case TIMER_VTIMER:
+ return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
+ default:
+ return 0;
+ }
+}
+
+void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
+{
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+
+ switch(arch_timer_ctx_index(ctxt)) {
+ case TIMER_VTIMER:
+ __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
+ break;
+ case TIMER_PTIMER:
+ __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
+ break;
+ default:
+ ctxt->cnt_ctl = ctl;
+ }
+}
+
+void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
+{
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+
+ switch(arch_timer_ctx_index(ctxt)) {
+ case TIMER_VTIMER:
+ __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
+ break;
+ case TIMER_PTIMER:
+ __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
+ break;
+ default:
+ ctxt->cnt_cval = cval;
+ }
+}
+
+void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
+{
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+
+ switch(arch_timer_ctx_index(ctxt)) {
+ case TIMER_VTIMER:
+ __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
+ break;
+ default:
+ WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
+ }
+}
As promissed, we now add an arm64-specific set of timer accessors, which in some cases access the VNCR page instead of the struct timer fields. This requires moving the initialisation of the timer struct so that some of the helpers (such as arch_timer_ctx_index) can work correctly at an early stage. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/kvm_host.h | 6 ++ arch/arm64/kvm/Makefile | 1 + virt/kvm/arm/arch_timer.c | 10 ++-- virt/kvm/arm/arch_timer_nested.c | 95 +++++++++++++++++++++++++++++++ 4 files changed, 107 insertions(+), 5 deletions(-) create mode 100644 virt/kvm/arm/arch_timer_nested.c