@@ -14,7 +14,6 @@
#define RISCV_PMU_LEGACY_CYCLE 0
#define RISCV_PMU_LEGACY_INSTRET 1
-#define RISCV_PMU_LEGACY_NUM_CTR 2
static bool pmu_init_done;
@@ -83,7 +82,8 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
{
pr_info("Legacy PMU implementation is available\n");
- pmu->num_counters = RISCV_PMU_LEGACY_NUM_CTR;
+ pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) |
+ BIT(RISCV_PMU_LEGACY_INSTRET);
pmu->ctr_start = pmu_legacy_ctr_start;
pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map;
@@ -265,7 +265,6 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
struct sbiret ret;
int idx;
uint64_t cbase = 0;
- uint64_t cmask = GENMASK_ULL(rvpmu->num_counters - 1, 0);
unsigned long cflags = 0;
if (event->attr.exclude_kernel)
@@ -274,8 +273,8 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
cflags |= SBI_PMU_CFG_FLAG_SET_UINH;
/* retrieve the available counter index */
- ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
- cflags, hwc->event_base, hwc->config, 0);
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
+ rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0);
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
hwc->event_base, hwc->config);
@@ -283,7 +282,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
}
idx = ret.value;
- if (idx >= rvpmu->num_counters || !pmu_ctr_list[idx].value)
+ if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
return -ENOENT;
/* Additional sanity check for the counter id */
@@ -447,7 +446,7 @@ static int pmu_sbi_find_num_ctrs(void)
return sbi_err_map_linux_errno(ret.error);
}
-static int pmu_sbi_get_ctrinfo(int nctr)
+static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
{
struct sbiret ret;
int i, num_hw_ctr = 0, num_fw_ctr = 0;
@@ -462,6 +461,9 @@ static int pmu_sbi_get_ctrinfo(int nctr)
if (ret.error)
/* The logical counter ids are not expected to be contiguous */
continue;
+
+ *mask |= BIT(i);
+
cinfo.value = ret.value;
if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
num_fw_ctr++;
@@ -482,7 +484,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
* which may include counters that are not enabled yet.
*/
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
- 0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0);
+ 0, pmu->cmask, 0, 0, 0, 0);
}
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
@@ -696,8 +698,9 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
static int pmu_sbi_device_probe(struct platform_device *pdev)
{
struct riscv_pmu *pmu = NULL;
- int num_counters;
+ unsigned long cmask = 0;
int ret = -ENODEV;
+ int num_counters;
pr_info("SBI PMU extension is available\n");
pmu = riscv_pmu_alloc();
@@ -711,7 +714,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
}
/* cache all the information about counters now */
- if (pmu_sbi_get_ctrinfo(num_counters))
+ if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
goto out_free;
ret = pmu_sbi_setup_irqs(pmu, pdev);
@@ -720,7 +723,8 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
}
- pmu->num_counters = num_counters;
+
+ pmu->cmask = cmask;
pmu->ctr_start = pmu_sbi_ctr_start;
pmu->ctr_stop = pmu_sbi_ctr_stop;
pmu->event_map = pmu_sbi_event_map;
@@ -45,7 +45,7 @@ struct riscv_pmu {
irqreturn_t (*handle_irq)(int irq_num, void *dev);
- int num_counters;
+ unsigned long cmask;
u64 (*ctr_read)(struct perf_event *event);
int (*ctr_get_idx)(struct perf_event *event);
int (*ctr_get_width)(int idx);