@@ -746,6 +746,11 @@ static inline bool isar_feature_aa64_ecv_traps(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, ECV) > 0;
}
+static inline bool isar_feature_aa64_ecv(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, ECV) > 1;
+}
+
static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;
@@ -453,6 +453,7 @@ typedef struct CPUArchState {
uint64_t c14_cntkctl; /* Timer Control register */
uint64_t cnthctl_el2; /* Counter/Timer Hyp Control register */
uint64_t cntvoff_el2; /* Counter Virtual Offset register */
+ uint64_t cntpoff_el2; /* Counter Physical Offset register */
ARMGenericTimer c14_timer[NUM_GTIMERS];
uint32_t c15_cpar; /* XScale Coprocessor Access Register */
uint32_t c15_ticonfig; /* TI925T configuration byte. */
@@ -1923,6 +1923,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
if (cpu_isar_feature(aa64_rme, cpu)) {
valid_mask |= SCR_NSE | SCR_GPF;
}
+ if (cpu_isar_feature(aa64_ecv, cpu)) {
+ valid_mask |= SCR_ECVEN;
+ }
} else {
valid_mask &= ~(SCR_RW | SCR_ST);
if (cpu_isar_feature(aa32_ras, cpu)) {
@@ -2682,6 +2685,25 @@ void gt_rme_post_el_change(ARMCPU *cpu, void *ignored)
gt_update_irq(cpu, GTIMER_PHYS);
}
+static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env)
+{
+ if ((env->cp15.scr_el3 & SCR_ECVEN) &&
+ FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) &&
+ arm_is_el2_enabled(env) &&
+ (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
+ return env->cp15.cntpoff_el2;
+ }
+ return 0;
+}
+
+static uint64_t gt_phys_cnt_offset(CPUARMState *env)
+{
+ if (arm_current_el(env) >= 2) {
+ return 0;
+ }
+ return gt_phys_raw_cnt_offset(env);
+}
+
static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
{
ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
@@ -2692,7 +2714,7 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
* reset timer to when ISTATUS next has to change
*/
uint64_t offset = timeridx == GTIMER_VIRT ?
- cpu->env.cp15.cntvoff_el2 : 0;
+ cpu->env.cp15.cntvoff_el2 : gt_phys_raw_cnt_offset(&cpu->env);
uint64_t count = gt_get_countervalue(&cpu->env);
/* Note that this must be unsigned 64 bit arithmetic: */
int istatus = count - offset >= gt->cval;
@@ -2755,7 +2777,7 @@ static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
- return gt_get_countervalue(env);
+ return gt_get_countervalue(env) - gt_phys_cnt_offset(env);
}
static uint64_t gt_virt_cnt_offset(CPUARMState *env)
@@ -2804,6 +2826,9 @@ static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
case GTIMER_HYPVIRT:
offset = gt_virt_cnt_offset(env);
break;
+ case GTIMER_PHYS:
+ offset = gt_phys_cnt_offset(env);
+ break;
}
return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
@@ -2821,6 +2846,9 @@ static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
case GTIMER_HYPVIRT:
offset = gt_virt_cnt_offset(env);
break;
+ case GTIMER_PHYS:
+ offset = gt_phys_cnt_offset(env);
+ break;
}
trace_arm_gt_tval_write(timeridx, value);
@@ -3000,6 +3028,9 @@ static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
R_CNTHCTL_EL1NVVCT_MASK |
R_CNTHCTL_EVNTIS_MASK;
}
+ if (cpu_isar_feature(aa64_ecv, cpu)) {
+ valid_mask |= R_CNTHCTL_ECV_MASK;
+ }
/* Clear RES0 bits */
value &= valid_mask;
@@ -3417,6 +3448,34 @@ static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
},
};
+static CPAccessResult gt_cntpoff_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 2 && !(env->cp15.scr_el3 & SCR_ECVEN)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void gt_cntpoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ trace_arm_gt_cntpoff_write(value);
+ raw_write(env, ri, value);
+ gt_recalc_timer(cpu, GTIMER_PHYS);
+}
+
+static const ARMCPRegInfo gen_timer_cntpoff_reginfo = {
+ .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6,
+ .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
+ .accessfn = gt_cntpoff_access, .writefn = gt_cntpoff_write,
+ .nv2_redirect_offset = 0x1a8,
+ .fieldoffset = offsetof(CPUARMState, cp15.cntpoff_el2),
+};
#else
/*
@@ -9301,6 +9360,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
define_arm_cp_regs(cpu, gen_timer_ecv_cp_reginfo);
}
+#ifndef CONFIG_USER_ONLY
+ if (cpu_isar_feature(aa64_ecv, cpu)) {
+ define_one_arm_cp_reg(cpu, &gen_timer_cntpoff_reginfo);
+ }
+#endif
if (arm_feature(env, ARM_FEATURE_VAPA)) {
ARMCPRegInfo vapa_cp_reginfo[] = {
{ .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
@@ -8,6 +8,7 @@ arm_gt_tval_write(int timer, uint64_t value) "gt_tval_write: timer %d value 0x%"
arm_gt_ctl_write(int timer, uint64_t value) "gt_ctl_write: timer %d value 0x%" PRIx64
arm_gt_imask_toggle(int timer) "gt_ctl_write: timer %d IMASK toggle"
arm_gt_cntvoff_write(uint64_t value) "gt_cntvoff_write: value 0x%" PRIx64
+arm_gt_cntpoff_write(uint64_t value) "gt_cntpoff_write: value 0x%" PRIx64
arm_gt_update_irq(int timer, int irqstate) "gt_update_irq: timer %d irqstate %d"
# kvm.c
When ID_AA64MMFR0_EL1.ECV is 0b0010, a new register CNTPOFF_EL2 is implemented. This is similar to the existing CNTVOFF_EL2, except that it controls a hypervisor-adjustable offset made to the physical counter and timer. Implement the handling for this register, which includes control/trap bits in SCR_EL3 and CNTHCTL_EL2. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> --- target/arm/cpu-features.h | 5 +++ target/arm/cpu.h | 1 + target/arm/helper.c | 68 +++++++++++++++++++++++++++++++++++++-- target/arm/trace-events | 1 + 4 files changed, 73 insertions(+), 2 deletions(-)