@@ -42,9 +42,22 @@
static DEFINE_IDA(ddr_ida);
+/* DDR Perf hardware feature */
+#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
+
+struct fsl_ddr_devtype_data {
+ unsigned int quirks; /* quirks needed for different DDR Perf core */
+};
+
+static const struct fsl_ddr_devtype_data imx8_devtype_data;
+
+static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
+ .quirks = DDR_CAP_AXI_ID_FILTER,
+};
+
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
- { .compatible = "fsl,imx8-ddr-pmu",},
- { .compatible = "fsl,imx8m-ddr-pmu",},
+ { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
+ { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
@@ -58,6 +71,7 @@ struct ddr_pmu {
struct perf_event *events[NUM_COUNTERS];
int active_events;
enum cpuhp_state cpuhp_state;
+ const struct fsl_ddr_devtype_data *devtype_data;
int irq;
int id;
};
@@ -129,6 +143,8 @@ static struct attribute *ddr_perf_events_attrs[] = {
IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
+ IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
+ IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
NULL,
};
@@ -138,9 +154,11 @@ static struct attribute_group ddr_perf_events_attr_group = {
};
PMU_FORMAT_ATTR(event, "config:0-7");
+PMU_FORMAT_ATTR(axi_id, "config1:0-31");
static struct attribute *ddr_perf_format_attrs[] = {
&format_attr_event.attr,
+ &format_attr_axi_id.attr,
NULL,
};
@@ -190,6 +208,26 @@ static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
}
+static bool ddr_perf_is_filtered(struct perf_event *event)
+{
+ return event->attr.config == 0x41 || event->attr.config == 0x42;
+}
+
+static u32 ddr_perf_filter_val(struct perf_event *event)
+{
+ return event->attr.config1;
+}
+
+static bool ddr_perf_filters_compatible(struct perf_event *a,
+ struct perf_event *b)
+{
+ if (!ddr_perf_is_filtered(a))
+ return true;
+ if (!ddr_perf_is_filtered(b))
+ return true;
+ return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
+}
+
static int ddr_perf_event_init(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
@@ -216,6 +254,15 @@ static int ddr_perf_event_init(struct perf_event *event)
!is_software_event(event->group_leader))
return -EINVAL;
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
+ if (!ddr_perf_filters_compatible(event, event->group_leader))
+ return -EINVAL;
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (!ddr_perf_filters_compatible(event, sibling))
+ return -EINVAL;
+ }
+ }
+
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu &&
!is_software_event(sibling))
@@ -289,6 +336,18 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
int counter;
int cfg = event->attr.config;
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
+ int i;
+
+ for (i = 1; i < NUM_COUNTERS; i++) {
+ if (pmu->events[i] &&
+ !ddr_perf_filters_compatible(event, pmu->events[i]))
+ return -EINVAL;
+ }
+
+ writel(event->attr.config1, pmu->base + COUNTER_DPCR1);
+ }
+
counter = ddr_perf_alloc_counter(pmu, cfg);
if (counter < 0) {
dev_dbg(pmu->dev, "There are not enough counters\n");
@@ -473,6 +532,8 @@ static int ddr_perf_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
+ pmu->devtype_data = of_device_get_match_data(&pdev->dev);
+
pmu->cpu = raw_smp_processor_id();
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
DDR_CPUHP_CB_NAME,
AXI filtering is used by CSV modes 0x41 and 0x42 to count reads or writes with an ARID or AWID matching filter setting. Granularity is at subsystem level. Implementation does not allow filtring between masters within a subsystem. Filter is defined with 2 configuration parameters. --AXI_ID defines AxID matching value --AXI_MASKING defines which bits of AxID are meaningful for the matching 0:corresponding bit is masked 1: corresponding bit is not masked, i.e. used to do the matching When non-masked bits are matching corresponding AXI_ID bits then counter is incremented. This filter allows counting read or write access from a subsystem or multiple subsystems. Perf counter is incremented if AxID && AXI_MASKING == AXI_ID && AXI_MASKING AXI_ID and AXI_MASKING are mapped on DPCR1 register in performance counter. Read and write AXI ID filter should write same value to DPCR1 if want to specify at the same time as this filter is shared between counters. e.g. perf stat -a -e imx8_ddr0/axid-read,axi_id=0xMMMMDDDD/,imx8_ddr0/axid-write,axi_id=0xMMMMDDDD/ cmd MMMM: AXI_MASKING DDDD: AXI_ID perf stat -a -e imx8_ddr0/axid-read,axi_id=0xffff0001/ cmd, which will monitor ARID=1 ChangeLog: V1 -> V2: * add error log if user specifies read/write AXI ID filter at the same time. * of_device_get_match_data() instead of of_match_device(), and remove the check of return value. V2 -> V3: * move the AXI ID check to event_add(). * add support for same value of axi_id. V3 -> V4: * move the AXI ID check to event_init(). V4 -> V5: * reject event group if AXI ID not consistent in event_init(). V5 -> V6: * change the event name: axi-id-read->axid-read; axi-id-write->axid-write * add another helper: ddr_perf_filters_compatible() * drop the dev_dbg() Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com> --- drivers/perf/fsl_imx8_ddr_perf.c | 65 +++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 2 deletions(-)