@@ -17,6 +17,7 @@
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/of.h>
+#include <linux/riscv_sse.h>
#include <linux/cpu_pm.h>
#include <linux/sched/clock.h>
@@ -668,10 +669,10 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
}
}
-static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
+static irqreturn_t pmu_sbi_ovf_handler(struct cpu_hw_events *cpu_hw_evt,
+ struct pt_regs *regs, bool from_sse)
{
struct perf_sample_data data;
- struct pt_regs *regs;
struct hw_perf_event *hw_evt;
union sbi_pmu_ctr_info *info;
int lidx, hidx, fidx;
@@ -679,7 +680,6 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
struct perf_event *event;
unsigned long overflow;
unsigned long overflowed_ctrs = 0;
- struct cpu_hw_events *cpu_hw_evt = dev;
u64 start_clock = sched_clock();
if (WARN_ON_ONCE(!cpu_hw_evt))
@@ -688,13 +688,15 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/* Firmware counter don't support overflow yet */
fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
if (fidx == RISCV_MAX_COUNTERS) {
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ if (!from_sse)
+ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
return IRQ_NONE;
}
event = cpu_hw_evt->events[fidx];
if (!event) {
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ if (!from_sse)
+ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
return IRQ_NONE;
}
@@ -706,16 +708,16 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/*
* Overflow interrupt pending bit should only be cleared after stopping
- * all the counters to avoid any race condition.
+ * all the counters to avoid any race condition. When using SSE,
+ * interrupt is cleared when stopping counters.
*/
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ if (!from_sse)
+ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
/* No overflow bit is set */
if (!overflow)
return IRQ_NONE;
- regs = get_irq_regs();
-
for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
struct perf_event *event = cpu_hw_evt->events[lidx];
@@ -761,6 +763,22 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
return IRQ_HANDLED;
}
+static irqreturn_t pmu_sbi_ovf_irq_handler(int irq, void *dev)
+{
+ return pmu_sbi_ovf_handler(dev, get_irq_regs(), false);
+}
+
+static int pmu_sbi_ovf_sse_handler(uint32_t evt, void *arg,
+ struct pt_regs *regs)
+{
+ struct cpu_hw_events __percpu *hw_events = arg;
+ struct cpu_hw_events *hw_event = raw_cpu_ptr(hw_events);
+
+ pmu_sbi_ovf_handler(hw_event, regs, true);
+
+ return 0;
+}
+
static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
@@ -804,9 +822,17 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
{
int ret;
+ struct sse_event *evt;
struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
struct irq_domain *domain = NULL;
+ evt = sse_event_register(SBI_SSE_EVENT_LOCAL_PMU, 0,
+ pmu_sbi_ovf_sse_handler, hw_events);
+ if (!IS_ERR(evt)) {
+ sse_event_enable(evt);
+ return 0;
+ }
+
if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
riscv_pmu_irq_num = RV_IRQ_PMU;
riscv_pmu_use_irq = true;
@@ -834,7 +860,7 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
return -ENODEV;
}
- ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
+ ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_irq_handler, "riscv-pmu", hw_events);
if (ret) {
pr_err("registering percpu irq failed [%d]\n", ret);
return ret;
In order to use SSE within PMU drivers, register a SSE handler for the local PMU event. Reuse the existing overlflow IRQ handler and pass appropriate pt_regs. Signed-off-by: Clément Léger <cleger@rivosinc.com> --- drivers/perf/riscv_pmu_sbi.c | 46 ++++++++++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 10 deletions(-)