@@ -241,6 +241,9 @@
#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
+/* The counteren flag */
+#define CE_TM 1
+
#ifndef __ASSEMBLY__
#define csr_swap(csr, val) \
@@ -41,6 +41,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
int kvm_riscv_guest_timer_init(struct kvm *kvm);
+u64 kvm_riscv_read_guest_time(struct kvm_vcpu *vcpu);
static inline bool kvm_riscv_need_scale(struct kvm_guest_timer *gt)
{
@@ -18,6 +18,10 @@
#define INSN_MASK_WFI 0xffffff00
#define INSN_MATCH_WFI 0x10500000
+#define INSN_MASK_RDTIME 0xfff03000
+#define INSN_MATCH_RDTIME 0xc0102000
+#define INSN_MASK_RDTIMEH 0xfff03000
+#define INSN_MATCH_RDTIMEH 0xc8102000
#define INSN_MATCH_LB 0x3
#define INSN_MASK_LB 0x707f
@@ -138,6 +142,34 @@ static int truly_illegal_insn(struct kvm_vcpu *vcpu,
return 1;
}
+static int system_opcode_insn_rdtime(struct kvm_vcpu *vcpu,
+ struct kvm_run *run,
+ ulong insn)
+{
+#ifdef CONFIG_64BIT
+ if ((insn & INSN_MASK_RDTIME) == INSN_MATCH_RDTIME) {
+ u64 guest_time = kvm_riscv_read_guest_time(vcpu);
+ SET_RD(insn, &vcpu->arch.guest_context, guest_time);
+ vcpu->arch.guest_context.sepc += INSN_LEN(insn);
+ return 1;
+ }
+#else
+ if ((insn & INSN_MASK_RDTIME) == INSN_MATCH_RDTIME) {
+ u64 guest_time = kvm_riscv_read_guest_time(vcpu);
+ SET_RD(insn, &vcpu->arch.guest_context, (u32)guest_time);
+ vcpu->arch.guest_context.sepc += INSN_LEN(insn);
+ return 1;
+ }
+ if ((insn & INSN_MASK_RDTIMEH) == INSN_MATCH_RDTIMEH) {
+ u64 guest_time = kvm_riscv_read_guest_time(vcpu);
+ SET_RD(insn, &vcpu->arch.guest_context, (u32)(guest_time >> 32));
+ vcpu->arch.guest_context.sepc += INSN_LEN(insn);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
static int system_opcode_insn(struct kvm_vcpu *vcpu,
struct kvm_run *run,
ulong insn)
@@ -154,6 +186,9 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu,
return 1;
}
+ if (system_opcode_insn_rdtime(vcpu, run, insn))
+ return 1;
+
return truly_illegal_insn(vcpu, run, insn);
}
@@ -49,6 +49,11 @@ static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
return kvm_riscv_scale_time(gt, host_time) + gt->time_delta;
}
+u64 kvm_riscv_read_guest_time(struct kvm_vcpu *vcpu)
+{
+ return kvm_riscv_current_cycles(&vcpu->kvm->arch.timer);
+}
+
static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
struct kvm_guest_timer *gt,
struct kvm_vcpu_timer *t)
@@ -241,6 +246,11 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
#endif
+
+ if (kvm_riscv_need_scale(gt))
+ csr_clear(CSR_HCOUNTEREN, 1UL << CE_TM);
+ else
+ csr_set(CSR_HCOUNTEREN, 1UL << CE_TM);
}
int kvm_riscv_guest_timer_init(struct kvm *kvm)