From 918dc87b746e0114eb64d6e478da7f370e266d5a Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 14 Jul 2023 11:48:31 -0600 Subject: drivers/perf: Explicitly include correct DT includes The DT of_device.h and of_platform.h date back to the separate of_platform_bus_type before it as merged into the regular platform bus. As part of that merge prepping Arm DT support 13 years ago, they "temporarily" include each other. They also include platform_device.h and of.h. As a result, there's a pretty much random mix of those include files used throughout the tree. In order to detangle these headers and replace the implicit includes with struct declarations, users need to explicitly include the correct includes. Signed-off-by: Rob Herring Link: https://lore.kernel.org/r/20230714174832.4061752-1-robh@kernel.org Signed-off-by: Will Deacon --- drivers/perf/fsl_imx8_ddr_perf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/perf/fsl_imx8_ddr_perf.c') diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 5222ba1e79d0..1cb3861ab0e0 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -10,10 +10,9 @@ #include #include #include -#include -#include #include #include +#include #include #define COUNTER_CNTL 0x0 -- cgit v1.2.3-59-g8ed1b From e89ecd8368860bf05437eabd07d292c316221cfc Mon Sep 17 00:00:00 2001 From: Xu Yang Date: Fri, 11 Aug 2023 09:54:37 +0800 Subject: perf/imx_ddr: speed up overflow frequency of cycle For i.MX8MP, we cannot ensure that cycle counter overflow occurs at least 4 times as often as other events. Due to byte counters will count for any event configured, it will overflow more often. And if byte counters overflow that related counters would stop since they share the COUNTER_CNTL. We can speed up cycle counter overflow frequency by setting counter parameter (CP) field of cycle counter. In this way, we can avoid stop counting byte counters when interrupt didn't come and the byte counters can be fetched or updated from each cycle counter overflow interrupt. Because we initialize CP filed to shorten counter0 overflow time, the cycle counter will start couting from a fixed/base value each time. We need to remove the base from the result too. Therefore, we could get precise result from cycle counter. Signed-off-by: Xu Yang Reviewed-by: Frank Li Link: https://lore.kernel.org/r/20230811015438.1999307-1-xu.yang_2@nxp.com Signed-off-by: Will Deacon --- drivers/perf/fsl_imx8_ddr_perf.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'drivers/perf/fsl_imx8_ddr_perf.c') diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 1cb3861ab0e0..7d4e49565738 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -27,6 +27,8 @@ #define CNTL_CLEAR_MASK 0xFFFFFFFD #define CNTL_OVER_MASK 0xFFFFFFFE +#define CNTL_CP_SHIFT 16 +#define CNTL_CP_MASK (0xFF << CNTL_CP_SHIFT) #define CNTL_CSV_SHIFT 24 #define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT) @@ -34,6 +36,8 @@ #define EVENT_CYCLES_COUNTER 0 #define NUM_COUNTERS 4 +/* For removing bias if cycle counter CNTL.CP is set to 0xf0 */ +#define CYCLES_COUNTER_MASK 0x0FFFFFFF #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */ #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) @@ -426,6 +430,17 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, writel(0, pmu->base + reg); val = CNTL_EN | CNTL_CLEAR; val |= FIELD_PREP(CNTL_CSV_MASK, config); + + /* + * On i.MX8MP we need to bias the cycle counter to overflow more often. + * We do this by initializing bits [23:16] of the counter value via the + * COUNTER_CTRL Counter Parameter (CP) field. + */ + if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) { + if (counter == EVENT_CYCLES_COUNTER) + val |= FIELD_PREP(CNTL_CP_MASK, 0xf0); + } + writel(val, pmu->base + reg); } else { /* Disable counter */ @@ -465,6 +480,12 @@ static void ddr_perf_event_update(struct perf_event *event) int ret; new_raw_count = ddr_perf_read_counter(pmu, counter); + /* Remove the bias applied in ddr_perf_counter_enable(). */ + if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) { + if (counter == EVENT_CYCLES_COUNTER) + new_raw_count &= CYCLES_COUNTER_MASK; + } + local64_add(new_raw_count, &event->count); /* -- cgit v1.2.3-59-g8ed1b From f4e2bd91ddf5e8543cbe7ad80b3fba3d2dc63fa3 Mon Sep 17 00:00:00 2001 From: Xu Yang Date: Fri, 11 Aug 2023 09:54:38 +0800 Subject: perf/imx_ddr: don't enable counter0 if none of 4 counters are used In current driver, counter0 will be enabled after ddr_perf_pmu_enable() is called even though none of the 4 counters are used. This will cause counter0 continue to count until ddr_perf_pmu_disabled() is called. If pmu is not disabled all the time, the pmu interrupt will be asserted from time to time due to counter0 will overflow and irq handler will clear it. It's not an expected behavior. This patch will not enable counter0 if none of 4 counters are used. Fixes: 9a66d36cc7ac ("drivers/perf: imx_ddr: Add DDR performance counter support to perf") Signed-off-by: Xu Yang Reviewed-by: Frank Li Link: https://lore.kernel.org/r/20230811015438.1999307-2-xu.yang_2@nxp.com Signed-off-by: Will Deacon --- drivers/perf/fsl_imx8_ddr_perf.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'drivers/perf/fsl_imx8_ddr_perf.c') diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 7d4e49565738..92611c98120f 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -104,6 +104,7 @@ struct ddr_pmu { const struct fsl_ddr_devtype_data *devtype_data; int irq; int id; + int active_counter; }; static ssize_t ddr_perf_identifier_show(struct device *dev, @@ -515,6 +516,10 @@ static void ddr_perf_event_start(struct perf_event *event, int flags) ddr_perf_counter_enable(pmu, event->attr.config, counter, true); + if (!pmu->active_counter++) + ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, + EVENT_CYCLES_COUNTER, true); + hwc->state = 0; } @@ -568,6 +573,10 @@ static void ddr_perf_event_stop(struct perf_event *event, int flags) ddr_perf_counter_enable(pmu, event->attr.config, counter, false); ddr_perf_event_update(event); + if (!--pmu->active_counter) + ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, + EVENT_CYCLES_COUNTER, false); + hwc->state |= PERF_HES_STOPPED; } @@ -585,25 +594,10 @@ static void ddr_perf_event_del(struct perf_event *event, int flags) static void ddr_perf_pmu_enable(struct pmu *pmu) { - struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); - - /* enable cycle counter if cycle is not active event list */ - if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL) - ddr_perf_counter_enable(ddr_pmu, - EVENT_CYCLES_ID, - EVENT_CYCLES_COUNTER, - true); } static void ddr_perf_pmu_disable(struct pmu *pmu) { - struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); - - if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL) - ddr_perf_counter_enable(ddr_pmu, - EVENT_CYCLES_ID, - EVENT_CYCLES_COUNTER, - false); } static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, -- cgit v1.2.3-59-g8ed1b