aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_mmiotrace.c
diff options
context:
space:
mode:
authorPekka Paalanen <pq@iki.fi>2009-01-06 13:57:11 +0200
committerIngo Molnar <mingo@elte.hu>2009-01-11 04:01:30 +0100
commit173ed24ee2d64f5de28654eb456ec1ee18a142e5 (patch)
tree0e47a59eca31c6d09f1bfe5496c3f7b03e15f16a /kernel/trace/trace_mmiotrace.c
parenttrace: mmiotrace to the tracer menu in Kconfig (diff)
downloadlinux-dev-173ed24ee2d64f5de28654eb456ec1ee18a142e5.tar.xz
linux-dev-173ed24ee2d64f5de28654eb456ec1ee18a142e5.zip
mmiotrace: count events lost due to not recording
Impact: enhances lost events counting in mmiotrace The tracing framework, or the ring buffer facility it uses, has a switch to stop recording data. When recording is off, the trace events will be lost. The framework does not count these, so mmiotrace has to count them itself. Signed-off-by: Pekka Paalanen <pq@iki.fi> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_mmiotrace.c')
-rw-r--r--kernel/trace/trace_mmiotrace.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index fcec59ff2355..621c8c3f3139 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/mmiotrace.h>
#include <linux/pci.h>
+#include <asm/atomic.h>
#include "trace.h"
#include "trace_output.h"
@@ -20,6 +21,7 @@ struct header_iter {
static struct trace_array *mmio_trace_array;
static bool overrun_detected;
static unsigned long prev_overruns;
+static atomic_t dropped_count;
static void mmio_reset_data(struct trace_array *tr)
{
@@ -122,11 +124,11 @@ static void mmio_close(struct trace_iterator *iter)
static unsigned long count_overruns(struct trace_iterator *iter)
{
- unsigned long cnt = 0;
+ unsigned long cnt = atomic_xchg(&dropped_count, 0);
unsigned long over = ring_buffer_overruns(iter->tr->buffer);
if (over > prev_overruns)
- cnt = over - prev_overruns;
+ cnt += over - prev_overruns;
prev_overruns = over;
return cnt;
}
@@ -308,8 +310,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
- if (!event)
+ if (!event) {
+ atomic_inc(&dropped_count);
return;
+ }
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
entry->ent.type = TRACE_MMIO_RW;
@@ -336,8 +340,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
- if (!event)
+ if (!event) {
+ atomic_inc(&dropped_count);
return;
+ }
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
entry->ent.type = TRACE_MMIO_MAP;