@@ -95,6 +95,13 @@ struct kvm_arch {
* supported.
*/
bool return_nisv_io_abort_to_user;
+
+ /* Guest PV Live Physical Time state */
+ struct {
+ u32 fpv; /* PV frequency */
+ gpa_t base; /* Base IPA of shared structure */
+ bool updated; /* Indicate whether it is updated by KVM */
+ } lpt;
};
#define KVM_NR_MEM_OBJS 40
@@ -506,6 +513,9 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
+gpa_t kvm_init_lpt_time(struct kvm *kvm);
+int kvm_update_lpt_time(struct kvm *kvm);
+
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
@@ -135,6 +135,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* The maximum number of VCPUs is limited by the host's GIC model */
kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
+ /* Should be setup by userspace before guest run */
+ kvm->arch.lpt.base = GPA_INVALID;
+
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm);
@@ -528,6 +531,10 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu->arch.has_run_once = true;
+ ret = kvm_update_lpt_time(kvm);
+ if (ret)
+ return ret;
+
if (likely(irqchip_in_kernel(kvm))) {
/*
* Map the VGIC hardware resources before running a vcpu the
@@ -62,6 +62,11 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
if (gpa != GPA_INVALID)
val = gpa;
break;
+ case ARM_SMCCC_HV_PV_TIME_LPT:
+ gpa = kvm_init_lpt_time(vcpu->kvm);
+ if (gpa != GPA_INVALID)
+ val = gpa;
+ break;
default:
return kvm_psci_call(vcpu);
}
@@ -43,7 +43,9 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
switch (feature) {
case ARM_SMCCC_HV_PV_TIME_FEATURES:
case ARM_SMCCC_HV_PV_TIME_ST:
- val = SMCCC_RET_SUCCESS;
+ case ARM_SMCCC_HV_PV_TIME_LPT:
+ if (vcpu->kvm->arch.lpt.updated)
+ val = SMCCC_RET_SUCCESS;
break;
}
@@ -134,3 +136,124 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
}
return -ENXIO;
}
+
+static int pvclock_lpt_update_vtimer(struct kvm *kvm,
+ struct pvclock_vm_lpt_time *pvclock)
+{
+ u32 current_freq = arch_timer_get_rate();
+ u64 current_time = kvm_phys_timer_read();
+ u32 previous_freq;
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ /* The first run? */
+ if (le64_to_cpu(pvclock->sequence_number) == 0)
+ return 0;
+
+ /* PV frequency must not change! */
+ if (le32_to_cpu(pvclock->pv_freq) != kvm->arch.lpt.fpv)
+ return -EFAULT;
+
+ previous_freq = le32_to_cpu(pvclock->native_freq);
+ if (previous_freq == current_freq)
+ return 0;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+ u64 cntvct, new_cntvct;
+ u32 cnt_tval, new_cnt_tval;
+
+ /* Update cntvoff based on new cntvct */
+ cntvct = current_time - vtimer->cntvoff;
+ new_cntvct = mul_u64_u32_div(cntvct,
+ current_freq,
+ previous_freq);
+ vtimer->cntvoff = current_time - new_cntvct;
+
+ /* Update cnt_cval based on new cnt_tval */
+ cnt_tval = vtimer->cnt_cval - cntvct;
+ new_cnt_tval = mul_u64_u32_div(cnt_tval,
+ current_freq,
+ previous_freq);
+ vtimer->cnt_cval = new_cntvct + new_cnt_tval;
+ }
+
+ return 0;
+}
+
+static void pvclock_lpt_update_structure(struct kvm *kvm,
+ struct pvclock_vm_lpt_time *pvclock)
+{
+ u64 sequence_number, scale_mult, rscale_mult;
+ u32 native_freq, pv_freq;
+ u32 scale_intbits, fracbits;
+ u32 rscale_intbits, rfracbits;
+
+ sequence_number = le64_to_cpu(pvclock->sequence_number) + 2;
+
+ native_freq = arch_timer_get_rate();
+ pv_freq = kvm->arch.lpt.fpv;
+
+ /* At least one bit for int part */
+ scale_intbits = rscale_intbits = 1;
+ if (pv_freq >= native_freq)
+ scale_intbits = ilog2(pv_freq / native_freq) + 1;
+ else
+ rscale_intbits = ilog2(native_freq / pv_freq) + 1;
+
+ fracbits = 64 - scale_intbits;
+ scale_mult = mul_u64_u32_div(BIT_ULL(fracbits), pv_freq, native_freq);
+ rfracbits = 64 - rscale_intbits;
+ rscale_mult = mul_u64_u32_div(BIT_ULL(rfracbits), native_freq, pv_freq);
+
+ pvclock->sequence_number = cpu_to_le64(sequence_number);
+ pvclock->native_freq = cpu_to_le32(native_freq);
+ pvclock->pv_freq = cpu_to_le32(pv_freq);
+ pvclock->scale_mult = cpu_to_le64(scale_mult);
+ pvclock->rscale_mult = cpu_to_le64(rscale_mult);
+ pvclock->fracbits = cpu_to_le32(fracbits);
+ pvclock->rfracbits = cpu_to_le32(rfracbits);
+}
+
+int kvm_update_lpt_time(struct kvm *kvm)
+{
+ u32 pv_freq = kvm->arch.lpt.fpv;
+ u64 lpt_ipa = kvm->arch.lpt.base;
+ struct pvclock_vm_lpt_time pvclock;
+ int ret = 0;
+
+ /* Userspace does not enable LPT? */
+ if (pv_freq == 0 && lpt_ipa == GPA_INVALID)
+ return 0;
+
+ /* Userspace fault programming? */
+ if (pv_freq == 0 || lpt_ipa == GPA_INVALID)
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+ if (kvm->arch.lpt.updated)
+ goto unlock;
+
+ ret = kvm_read_guest_lock(kvm, lpt_ipa, &pvclock, sizeof(pvclock));
+ if (ret < 0)
+ goto unlock;
+
+ ret = pvclock_lpt_update_vtimer(kvm, &pvclock);
+ if (ret < 0)
+ goto unlock;
+
+ pvclock_lpt_update_structure(kvm, &pvclock);
+
+ ret = kvm_write_guest_lock(kvm, lpt_ipa, &pvclock, sizeof(pvclock));
+ if (!ret)
+ kvm->arch.lpt.updated = true;
+
+unlock:
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+gpa_t kvm_init_lpt_time(struct kvm *kvm)
+{
+ return kvm->arch.lpt.base;
+}