aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hwtracing/coresight/coresight-tmc-etf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hwtracing/coresight/coresight-tmc-etf.c')
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c43
1 files changed, 26 insertions, 17 deletions
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 2527b5d3b65e..23b7ff00af5c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -280,7 +280,6 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev,
u32 mode, void *data)
{
int ret;
- struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
switch (mode) {
case CS_MODE_SYSFS:
@@ -298,7 +297,7 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev,
if (ret)
return ret;
- dev_dbg(drvdata->dev, "TMC-ETB/ETF enabled\n");
+ dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
return 0;
}
@@ -328,7 +327,7 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
+ dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
return 0;
}
@@ -351,7 +350,7 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (!ret)
- dev_dbg(drvdata->dev, "TMC-ETF enabled\n");
+ dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
return ret;
}
@@ -371,19 +370,17 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
+ dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
}
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
- int node, cpu = event->cpu;
+ int node;
struct cs_buffers *buf;
- if (cpu == -1)
- cpu = smp_processor_id();
- node = cpu_to_node(cpu);
+ node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
/* Allocate memory structure for interaction with Perf */
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
@@ -477,9 +474,11 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
/*
* The TMC RAM buffer may be bigger than the space available in the
* perf ring buffer (handle->size). If so advance the RRP so that we
- * get the latest trace data.
+ * get the latest trace data. In snapshot mode none of that matters
+ * since we are expected to clobber stale data in favour of the latest
+ * traces.
*/
- if (to_read > handle->size) {
+ if (!buf->snapshot && to_read > handle->size) {
u32 mask = 0;
/*
@@ -516,7 +515,13 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
lost = true;
}
- if (lost)
+ /*
+ * Don't set the TRUNCATED flag in snapshot mode because 1) the
+ * captured buffer is expected to be truncated and 2) a full buffer
+ * prevents the event from being re-enabled by the perf core,
+ * resulting in stale data being send to user space.
+ */
+ if (!buf->snapshot && lost)
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
cur = buf->cur;
@@ -542,11 +547,15 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
}
}
- /* In snapshot mode we have to update the head */
- if (buf->snapshot) {
- handle->head = (cur * PAGE_SIZE) + offset;
- to_read = buf->nr_pages << PAGE_SHIFT;
- }
+ /*
+ * In snapshot mode we simply increment the head by the number of byte
+ * that were written. User space function cs_etm_find_snapshot() will
+ * figure out how many bytes to get from the AUX buffer based on the
+ * position of the head.
+ */
+ if (buf->snapshot)
+ handle->head += to_read;
+
CS_LOCK(drvdata->base);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);