@@ -2463,6 +2463,16 @@ arm64 system registers have the following id bit patterns::
derived from the register encoding for CNTV_CVAL_EL0. As this is
API, it must remain this way.
+.. warning::
+
+ The value of KVM_REG_ARM_TIMER_OFFSET is defined as an offset from
+ the guest's view of the physical counter-timer.
+
+ Userspace should use either KVM_REG_ARM_TIMER_OFFSET or
+ KVM_REG_ARM_TIMER_CNT to pause and resume a guest's virtual
+ counter-timer. Mixed use of these registers could result in an
+ unpredictable guest counter value.
+
arm64 firmware pseudo-registers have the following bit pattern::
0x6030 0000 0014 <regno:16>
@@ -7265,3 +7275,16 @@ The argument to KVM_ENABLE_CAP is also a bitmask, and must be a subset
of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace
the hypercalls whose corresponding bit is in the argument, and return
ENOSYS for the others.
+
+8.35 KVM_CAP_ARM_VTIMER_OFFSET
+------------------------------
+
+:Capability: KVM_CAP_ARM_VTIMER_OFFSET
+:Architectures: arm64
+:Type: vm
+
+This capability, if enabled, will cause KVM to expose the
+KVM_REG_ARM_TIMER_OFFSET register offset through the
+KVM_{GET,SET}_ONE_REG and KVM_GET_REG_LIST ioctls. Implementing VMMs
+must observe the warning prescribed in section 4.68 with regard to the
+mixed use of timer registers.
@@ -136,6 +136,9 @@ struct kvm_arch {
/* Memory Tagging Extension enabled for the guest */
bool mte_enabled;
+
+ /* KVM_REG_ARM_TIMER_OFFSET enabled for the guest */
+ bool vtimer_offset_enabled;
};
struct kvm_vcpu_fault_info {
@@ -255,6 +255,7 @@ struct kvm_arm_copy_mte_tags {
#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
+#define KVM_REG_ARM_TIMER_OFFSET ARM64_SYS_REG(3, 4, 14, 0, 3)
/* KVM-as-firmware specific pseudo-registers */
#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
@@ -92,6 +92,18 @@ static u64 timer_get_offset(struct arch_timer_context *ctxt)
}
}
+static u64 timer_get_guest_offset(struct arch_timer_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+
+ switch (arch_timer_ctx_index(ctxt)) {
+ case TIMER_VTIMER:
+ return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
+ default:
+ return 0;
+ }
+}
+
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
@@ -852,6 +864,10 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
timer = vcpu_vtimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
break;
+ case KVM_REG_ARM_TIMER_OFFSET:
+ timer = vcpu_vtimer(vcpu);
+ update_vtimer_cntvoff(vcpu, value);
+ break;
case KVM_REG_ARM_PTIMER_CTL:
timer = vcpu_ptimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
@@ -896,6 +912,9 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
case KVM_REG_ARM_TIMER_CVAL:
return kvm_arm_timer_read(vcpu,
vcpu_vtimer(vcpu), TIMER_REG_CVAL);
+ case KVM_REG_ARM_TIMER_OFFSET:
+ return kvm_arm_timer_read(vcpu,
+ vcpu_vtimer(vcpu), TIMER_REG_OFFSET);
case KVM_REG_ARM_PTIMER_CTL:
return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CTL);
@@ -933,6 +952,10 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
val = kvm_phys_timer_read() - timer_get_offset(timer);
break;
+ case TIMER_REG_OFFSET:
+ val = timer_get_guest_offset(timer);
+ break;
+
default:
BUG();
}
@@ -101,6 +101,10 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
}
mutex_unlock(&kvm->lock);
break;
+ case KVM_CAP_ARM_VTIMER_OFFSET:
+ r = 0;
+ kvm->arch.vtimer_offset_enabled = true;
+ break;
default:
r = -EINVAL;
break;
@@ -215,6 +219,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_VCPU_ATTRIBUTES:
case KVM_CAP_PTP_KVM:
+ case KVM_CAP_ARM_VTIMER_OFFSET:
r = 1;
break;
case KVM_CAP_SET_GUEST_DEBUG2:
@@ -590,16 +590,23 @@ static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
static inline unsigned long num_timer_regs(struct kvm_vcpu *vcpu)
{
- return 3;
+ unsigned long nr_regs = 3;
+
+ if (vcpu->kvm->arch.vtimer_offset_enabled)
+ nr_regs++;
+
+ return nr_regs;
}
-static bool is_timer_reg(u64 index)
+static bool is_timer_reg(struct kvm_vcpu *vcpu, u64 index)
{
switch (index) {
case KVM_REG_ARM_TIMER_CTL:
case KVM_REG_ARM_TIMER_CNT:
case KVM_REG_ARM_TIMER_CVAL:
return true;
+ case KVM_REG_ARM_TIMER_OFFSET:
+ return vcpu->kvm->arch.vtimer_offset_enabled;
}
return false;
}
@@ -615,6 +622,12 @@ static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
return -EFAULT;
+ if (vcpu->kvm->arch.vtimer_offset_enabled) {
+ uindices++;
+ if (put_user(KVM_REG_ARM_TIMER_OFFSET, uindices))
+ return -EFAULT;
+ }
+
return 0;
}
@@ -763,7 +776,7 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
}
- if (is_timer_reg(reg->id))
+ if (is_timer_reg(vcpu, reg->id))
return get_timer_reg(vcpu, reg);
return kvm_arm_sys_reg_get_reg(vcpu, reg);
@@ -781,7 +794,7 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
}
- if (is_timer_reg(reg->id))
+ if (is_timer_reg(vcpu, reg->id))
return set_timer_reg(vcpu, reg);
return kvm_arm_sys_reg_set_reg(vcpu, reg);
@@ -21,6 +21,7 @@ enum kvm_arch_timer_regs {
TIMER_REG_CVAL,
TIMER_REG_TVAL,
TIMER_REG_CTL,
+ TIMER_REG_OFFSET,
};
struct arch_timer_context {
@@ -1112,6 +1112,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_BINARY_STATS_FD 203
#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
#define KVM_CAP_ARM_MTE 205
+#define KVM_CAP_ARM_VTIMER_OFFSET 206
#ifdef KVM_CAP_IRQ_ROUTING