@@ -17,17 +17,26 @@ typedef unsigned long cycles_t;
#ifdef CONFIG_64BIT
static inline cycles_t get_cycles(void)
{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZICNTR))
+ return csr_read(CSR_TIME);
+
return readq_relaxed(clint_time_val);
}
#else /* !CONFIG_64BIT */
static inline u32 get_cycles(void)
{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZICNTR))
+ return csr_read(CSR_TIME);
+
return readl_relaxed(((u32 *)clint_time_val));
}
#define get_cycles get_cycles
static inline u32 get_cycles_hi(void)
{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZICNTR))
+ return csr_read(CSR_TIMEH);
+
return readl_relaxed(((u32 *)clint_time_val) + 1);
}
#define get_cycles_hi get_cycles_hi
@@ -40,7 +49,8 @@ static inline u32 get_cycles_hi(void)
*/
static inline unsigned long random_get_entropy(void)
{
- if (unlikely(clint_time_val == NULL))
+ if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZICNTR) &&
+ (unlikely(clint_time_val == NULL)))
return random_get_entropy_fallback();
return get_cycles();
}
@@ -39,6 +39,7 @@ static u64 __iomem *clint_timer_cmp;
static u64 __iomem *clint_timer_val;
static unsigned long clint_timer_freq;
static unsigned int clint_timer_irq;
+static bool is_c900_clint;
#ifdef CONFIG_RISCV_M_MODE
u64 __iomem *clint_time_val;
@@ -79,6 +80,9 @@ static void clint_ipi_interrupt(struct irq_desc *desc)
#ifdef CONFIG_64BIT
static u64 notrace clint_get_cycles64(void)
{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZICNTR))
+ return csr_read(CSR_TIME);
+
return clint_get_cycles();
}
#else /* CONFIG_64BIT */
@@ -86,10 +90,17 @@ static u64 notrace clint_get_cycles64(void)
{
u32 hi, lo;
- do {
- hi = clint_get_cycles_hi();
- lo = clint_get_cycles();
- } while (hi != clint_get_cycles_hi());
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZICNTR)) {
+ do {
+ hi = csr_read(CSR_TIMEH);
+ lo = csr_read(CSR_TIME);
+ } while (hi != csr_read(CSR_TIMEH));
+ } else {
+ do {
+ hi = clint_get_cycles_hi();
+ lo = clint_get_cycles();
+ } while (hi != clint_get_cycles_hi());
+ }
return ((u64)hi << 32) | lo;
}
@@ -119,6 +130,19 @@ static int clint_clock_next_event(unsigned long delta,
return 0;
}
+static int c900_clint_clock_next_event(unsigned long delta,
+ struct clock_event_device *ce)
+{
+ void __iomem *r = clint_timer_cmp +
+ cpuid_to_hartid_map(smp_processor_id());
+ u64 val = clint_get_cycles64() + delta;
+
+ csr_set(CSR_IE, IE_TIE);
+ writel_relaxed(val, r);
+ writel_relaxed(val >> 32, r + 4);
+ return 0;
+}
+
static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = {
.name = "clint_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
@@ -130,6 +154,9 @@ static int clint_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
+ if (is_c900_clint)
+ ce->set_next_event = c900_clint_clock_next_event;
+
ce->cpumask = cpumask_of(cpu);
clockevents_config_and_register(ce, clint_timer_freq, 100, ULONG_MAX);
@@ -161,7 +188,7 @@ static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init clint_timer_init_dt(struct device_node *np)
+static int __init clint_timer_init(struct device_node *np)
{
int rc;
u32 i, nr_irqs;
@@ -273,5 +300,18 @@ static int __init clint_timer_init_dt(struct device_node *np)
return rc;
}
+static int __init clint_timer_init_dt(struct device_node *np)
+{
+ is_c900_clint = false;
+ return clint_timer_init(np);
+}
+
+static int __init c900_clint_timer_init_dt(struct device_node *np)
+{
+ is_c900_clint = true;
+ return clint_timer_init(np);
+}
+
TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt);
TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt);
+TIMER_OF_DECLARE(clint_timer2, "thead,c900-clint", c900_clint_timer_init_dt);