From 919d590f13c94c5465db723178e4b72200fd5f90 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 20 Nov 2012 10:51:02 +0100 Subject: perf symbols: Fix dso__fprintf() print statement Was ignoring the dso type (function vs. variable) and was therefore printing bogus information. Signed-off-by: Stephane Eranian Cc: Ingo Molnar Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20121120095101.GA5939@quad Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/dso.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index d6d9a465acdb..be437850edc8 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -583,7 +583,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) if (dso->short_name != dso->long_name) ret += fprintf(fp, "%s, ", dso->long_name); ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], - dso->loaded ? "" : "NOT "); + dso__loaded(dso, type) ? "" : "NOT "); ret += dso__fprintf_buildid(dso, fp); ret += fprintf(fp, ")\n"); for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { -- cgit v1.2.3-59-g8ed1b From 752914208ada0d9ae0a6b3bf2906d43f3605832f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 15 Nov 2012 01:47:40 +0900 Subject: perf ui: Always compile error printing code It is used everywhere so always build it regardless of ui engine. Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1352911664-24620-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 14 +++++--------- tools/perf/ui/util.c | 10 ++++++++++ tools/perf/util/debug.c | 22 ---------------------- tools/perf/util/debug.h | 33 ++------------------------------- 4 files changed, 17 insertions(+), 62 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 891bc77bdb2c..8fca560cf78e 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -378,8 +378,11 @@ LIB_H += util/rblist.h LIB_H += util/intlist.h LIB_H += util/perf_regs.h LIB_H += util/unwind.h -LIB_H += ui/helpline.h LIB_H += util/vdso.h +LIB_H += ui/helpline.h +LIB_H += ui/progress.h +LIB_H += ui/util.h +LIB_H += ui/ui.h LIB_OBJS += $(OUTPUT)util/abspath.o LIB_OBJS += $(OUTPUT)util/alias.o @@ -453,6 +456,7 @@ LIB_OBJS += $(OUTPUT)util/stat.o LIB_OBJS += $(OUTPUT)ui/setup.o LIB_OBJS += $(OUTPUT)ui/helpline.o LIB_OBJS += $(OUTPUT)ui/progress.o +LIB_OBJS += $(OUTPUT)ui/util.o LIB_OBJS += $(OUTPUT)ui/hist.o LIB_OBJS += $(OUTPUT)ui/stdio/hist.o @@ -646,7 +650,6 @@ ifndef NO_NEWT LIB_OBJS += $(OUTPUT)ui/browsers/hists.o LIB_OBJS += $(OUTPUT)ui/browsers/map.o LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o - LIB_OBJS += $(OUTPUT)ui/util.o LIB_OBJS += $(OUTPUT)ui/tui/setup.o LIB_OBJS += $(OUTPUT)ui/tui/util.o LIB_OBJS += $(OUTPUT)ui/tui/helpline.o @@ -655,9 +658,6 @@ ifndef NO_NEWT LIB_H += ui/browsers/map.h LIB_H += ui/keysyms.h LIB_H += ui/libslang.h - LIB_H += ui/progress.h - LIB_H += ui/util.h - LIB_H += ui/ui.h endif endif @@ -677,10 +677,6 @@ ifndef NO_GTK2 LIB_OBJS += $(OUTPUT)ui/gtk/util.o LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o LIB_OBJS += $(OUTPUT)ui/gtk/progress.o - # Make sure that it'd be included only once. - ifeq ($(findstring -DNEWT_SUPPORT,$(BASIC_CFLAGS)),) - LIB_OBJS += $(OUTPUT)ui/util.o - endif endif endif diff --git a/tools/perf/ui/util.c b/tools/perf/ui/util.c index 4f989774c8c6..3014a7cd5271 100644 --- a/tools/perf/ui/util.c +++ b/tools/perf/ui/util.c @@ -52,6 +52,16 @@ int ui__warning(const char *format, ...) return ret; } +int ui__error_paranoid(void) +{ + return ui__error("Permission error - are you root?\n" + "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" + " -1 - Not paranoid at all\n" + " 0 - Disallow raw tracepoint access for unpriv\n" + " 1 - Disallow cpu events for unpriv\n" + " 2 - Disallow kernel profiling for unpriv\n"); +} + /** * perf_error__register - Register error logging functions diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 03f830b48148..39861a2a7d18 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -49,28 +49,6 @@ int dump_printf(const char *fmt, ...) return ret; } -#if !defined(NEWT_SUPPORT) && !defined(GTK2_SUPPORT) -int ui__warning(const char *format, ...) -{ - va_list args; - - va_start(args, format); - vfprintf(stderr, format, args); - va_end(args); - return 0; -} -#endif - -int ui__error_paranoid(void) -{ - return ui__error("Permission error - are you root?\n" - "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" - " -1 - Not paranoid at all\n" - " 0 - Disallow raw tracepoint access for unpriv\n" - " 1 - Disallow cpu events for unpriv\n" - " 2 - Disallow kernel profiling for unpriv\n"); -} - void trace_event(union perf_event *event) { unsigned char *raw_event = (void *)event; diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 83e8d234af6b..6e2667fb8211 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -5,6 +5,8 @@ #include #include "event.h" #include "../ui/helpline.h" +#include "../ui/progress.h" +#include "../ui/util.h" extern int verbose; extern bool quiet, dump_trace; @@ -12,38 +14,7 @@ extern bool quiet, dump_trace; int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void trace_event(union perf_event *event); -struct ui_progress; -struct perf_error_ops; - -#if defined(NEWT_SUPPORT) || defined(GTK2_SUPPORT) - -#include "../ui/progress.h" int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); -#include "../ui/util.h" - -#else - -static inline void ui_progress__update(u64 curr __maybe_unused, - u64 total __maybe_unused, - const char *title __maybe_unused) {} -static inline void ui_progress__finish(void) {} - -#define ui__error(format, arg...) ui__warning(format, ##arg) - -static inline int -perf_error__register(struct perf_error_ops *eops __maybe_unused) -{ - return 0; -} - -static inline int -perf_error__unregister(struct perf_error_ops *eops __maybe_unused) -{ - return 0; -} - -#endif /* NEWT_SUPPORT || GTK2_SUPPORT */ - int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); int ui__error_paranoid(void); -- cgit v1.2.3-59-g8ed1b From b56e53312d445967f6bdb91e5667c56755e47450 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 15 Nov 2012 01:47:41 +0900 Subject: perf ui/helpline: Introduce ui_helpline__vshow() The ui_helpline__vshow() will be used for pr_* functions. Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1352911664-24620-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/gtk/helpline.c | 23 ++++++++++++----------- tools/perf/ui/helpline.c | 12 ++++++++++++ tools/perf/ui/helpline.h | 22 ++-------------------- tools/perf/ui/tui/helpline.c | 29 +++++++++++++++-------------- tools/perf/util/debug.c | 6 ++---- 5 files changed, 43 insertions(+), 49 deletions(-) diff --git a/tools/perf/ui/gtk/helpline.c b/tools/perf/ui/gtk/helpline.c index 5db4432ff12a..3388cbd12186 100644 --- a/tools/perf/ui/gtk/helpline.c +++ b/tools/perf/ui/gtk/helpline.c @@ -24,17 +24,7 @@ static void gtk_helpline_push(const char *msg) pgctx->statbar_ctx_id, msg); } -static struct ui_helpline gtk_helpline_fns = { - .pop = gtk_helpline_pop, - .push = gtk_helpline_push, -}; - -void perf_gtk__init_helpline(void) -{ - helpline_fns = >k_helpline_fns; -} - -int perf_gtk__show_helpline(const char *fmt, va_list ap) +static int gtk_helpline_show(const char *fmt, va_list ap) { int ret; char *ptr; @@ -54,3 +44,14 @@ int perf_gtk__show_helpline(const char *fmt, va_list ap) return ret; } + +static struct ui_helpline gtk_helpline_fns = { + .pop = gtk_helpline_pop, + .push = gtk_helpline_push, + .show = gtk_helpline_show, +}; + +void perf_gtk__init_helpline(void) +{ + helpline_fns = >k_helpline_fns; +} diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c index a49bcf3c190b..700fb3cfa1c7 100644 --- a/tools/perf/ui/helpline.c +++ b/tools/perf/ui/helpline.c @@ -16,9 +16,16 @@ static void nop_helpline__push(const char *msg __maybe_unused) { } +static int nop_helpline__show(const char *fmt __maybe_unused, + va_list ap __maybe_unused) +{ + return 0; +} + static struct ui_helpline default_helpline_fns = { .pop = nop_helpline__pop, .push = nop_helpline__push, + .show = nop_helpline__show, }; struct ui_helpline *helpline_fns = &default_helpline_fns; @@ -59,3 +66,8 @@ void ui_helpline__puts(const char *msg) ui_helpline__pop(); ui_helpline__push(msg); } + +int ui_helpline__vshow(const char *fmt, va_list ap) +{ + return helpline_fns->show(fmt, ap); +} diff --git a/tools/perf/ui/helpline.h b/tools/perf/ui/helpline.h index baa28a4d16b9..46181f4fc07e 100644 --- a/tools/perf/ui/helpline.h +++ b/tools/perf/ui/helpline.h @@ -9,6 +9,7 @@ struct ui_helpline { void (*pop)(void); void (*push)(const char *msg); + int (*show)(const char *fmt, va_list ap); }; extern struct ui_helpline *helpline_fns; @@ -20,28 +21,9 @@ void ui_helpline__push(const char *msg); void ui_helpline__vpush(const char *fmt, va_list ap); void ui_helpline__fpush(const char *fmt, ...); void ui_helpline__puts(const char *msg); +int ui_helpline__vshow(const char *fmt, va_list ap); extern char ui_helpline__current[512]; - -#ifdef NEWT_SUPPORT extern char ui_helpline__last_msg[]; -int ui_helpline__show_help(const char *format, va_list ap); -#else -static inline int ui_helpline__show_help(const char *format __maybe_unused, - va_list ap __maybe_unused) -{ - return 0; -} -#endif /* NEWT_SUPPORT */ - -#ifdef GTK2_SUPPORT -int perf_gtk__show_helpline(const char *format, va_list ap); -#else -static inline int perf_gtk__show_helpline(const char *format __maybe_unused, - va_list ap __maybe_unused) -{ - return 0; -} -#endif /* GTK2_SUPPORT */ #endif /* _PERF_UI_HELPLINE_H_ */ diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c index 2884d2f41e33..1c8b9afd5d6e 100644 --- a/tools/perf/ui/tui/helpline.c +++ b/tools/perf/ui/tui/helpline.c @@ -8,6 +8,8 @@ #include "../ui.h" #include "../libslang.h" +char ui_helpline__last_msg[1024]; + static void tui_helpline__pop(void) { } @@ -23,20 +25,7 @@ static void tui_helpline__push(const char *msg) strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0'; } -struct ui_helpline tui_helpline_fns = { - .pop = tui_helpline__pop, - .push = tui_helpline__push, -}; - -void ui_helpline__init(void) -{ - helpline_fns = &tui_helpline_fns; - ui_helpline__puts(" "); -} - -char ui_helpline__last_msg[1024]; - -int ui_helpline__show_help(const char *format, va_list ap) +static int tui_helpline__show(const char *format, va_list ap) { int ret; static int backlog; @@ -55,3 +44,15 @@ int ui_helpline__show_help(const char *format, va_list ap) return ret; } + +struct ui_helpline tui_helpline_fns = { + .pop = tui_helpline__pop, + .push = tui_helpline__push, + .show = tui_helpline__show, +}; + +void ui_helpline__init(void) +{ + helpline_fns = &tui_helpline_fns; + ui_helpline__puts(" "); +} diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 39861a2a7d18..391c9a9e946f 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -23,10 +23,8 @@ int eprintf(int level, const char *fmt, ...) if (verbose >= level) { va_start(args, fmt); - if (use_browser == 1) - ret = ui_helpline__show_help(fmt, args); - else if (use_browser == 2) - ret = perf_gtk__show_helpline(fmt, args); + if (use_browser > 1) + ui_helpline__vshow(fmt, args); else ret = vfprintf(stderr, fmt, args); va_end(args); -- cgit v1.2.3-59-g8ed1b From 3cde41b0d63d0550ac9c8352f1ce0ea516690f46 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 14 Nov 2012 13:39:50 +0900 Subject: perf tools: Don't check configuration on make clean Current perf build process checks various system configuration on invocation to make. But this is not needed just for cleaning. To do that, move some of python related variables out of conditional since 'clean' target needs them. Normal path should not be affected by this. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1352867990-658-1-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/Makefile | 2 ++ tools/perf/Makefile | 28 ++++++++++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index ef6d22e879eb..f6e5901c8f5f 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile @@ -222,10 +222,12 @@ install-pdf: pdf #install-html: html # '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) +ifneq ($(MAKECMDGOALS),clean) $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE -include $(OUTPUT)PERF-VERSION-FILE +endif # # Determine "include::" file references in asciidoc files. diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 8fca560cf78e..08da9fc616c4 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -153,6 +153,7 @@ INSTALL = install # explicitly what architecture to check for. Fix this up for yours.. SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ +ifneq ($(MAKECMDGOALS),clean) -include config/feature-tests.mak ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y) @@ -206,6 +207,7 @@ ifeq ($(call try-cc,$(SOURCE_BIONIC),$(CFLAGS),bionic),y) EXTLIBS := $(filter-out -lpthread,$(EXTLIBS)) BASIC_CFLAGS += -I. endif +endif # MAKECMDGOALS != clean # Guard against environment variables BUILTIN_OBJS = @@ -230,11 +232,19 @@ endif LIBTRACEEVENT = $(TE_PATH)libtraceevent.a TE_LIB := -L$(TE_PATH) -ltraceevent +export LIBTRACEEVENT + +# python extension build directories +PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/ +PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/ +PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/ +export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP + +python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so + PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources) PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py -export LIBTRACEEVENT - $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ --quiet build_ext; \ @@ -514,6 +524,7 @@ PERFLIBS = $(LIB_FILE) $(LIBTRACEEVENT) # # Platform specific tweaks # +ifneq ($(MAKECMDGOALS),clean) # We choose to avoid "if .. else if .. else .. endif endif" # because maintaining the nesting to match is a pain. If @@ -703,7 +714,7 @@ disable-python = $(eval $(disable-python_code)) define disable-python_code BASIC_CFLAGS += -DNO_LIBPYTHON $(if $(1),$(warning No $(1) was found)) - $(warning Python support won't be built) + $(warning Python support will not be built) endef override PYTHON := \ @@ -711,19 +722,10 @@ override PYTHON := \ ifndef PYTHON $(call disable-python,python interpreter) - python-clean := else PYTHON_WORD := $(call shell-wordify,$(PYTHON)) - # python extension build directories - PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/ - PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/ - PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/ - export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP - - python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so - ifdef NO_LIBPYTHON $(call disable-python) else @@ -839,6 +841,8 @@ ifdef ASCIIDOC8 export ASCIIDOC8 endif +endif # MAKECMDGOALS != clean + # Shell quote (do not use $(call) to accommodate ancient setups); ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG)) -- cgit v1.2.3-59-g8ed1b From 03cd20949964f5cda600a56e12ffac39dfec4cb0 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 21 Nov 2012 13:43:19 +0900 Subject: perf session: Free environment information when deleting session The perf session environment information was saved (so allocated) during perf_session__open, but was not freed. As free(3) handles NULL pointer input properly it won't cause a issue for writing modes - e.g. perf record Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Feng Tang Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1353472999-23042-1-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/session.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index ce6f51162386..d5fb60760bac 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -204,11 +204,28 @@ static void perf_session__delete_threads(struct perf_session *session) machine__delete_threads(&session->host_machine); } +static void perf_session_env__delete(struct perf_session_env *env) +{ + free(env->hostname); + free(env->os_release); + free(env->version); + free(env->arch); + free(env->cpu_desc); + free(env->cpuid); + + free(env->cmdline); + free(env->sibling_cores); + free(env->sibling_threads); + free(env->numa_nodes); + free(env->pmu_mappings); +} + void perf_session__delete(struct perf_session *self) { perf_session__destroy_kernel_maps(self); perf_session__delete_dead_threads(self); perf_session__delete_threads(self); + perf_session_env__delete(&self->header.env); machine__exit(&self->host_machine); close(self->fd); free(self); -- cgit v1.2.3-59-g8ed1b From ee8d7787e197a0b1829c3f17dc1287224f14f35e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 21 Nov 2012 18:58:50 -0300 Subject: perf top: Add missing newline on pr_err call The perf_event__process_sample function, when not finding a machine associated with a sample, was calling pr_err without a newline, garbling the screen on TUI mode due to a problem introduced by a recent ui_helpline patch. On --stdio it would just concatenate the messages for each sample with no machine associated, fix it by adding the newline. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-vuz88welqvp15c2uybd9osnz@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-top.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index c9ff3950cd4b..987e1b8a9c2e 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -727,7 +727,7 @@ static void perf_event__process_sample(struct perf_tool *tool, } if (!machine) { - pr_err("%u unprocessable samples recorded.", + pr_err("%u unprocessable samples recorded.\n", top->session->hists.stats.nr_unprocessable_samples++); return; } -- cgit v1.2.3-59-g8ed1b From 35d48ddfc0627443bd7ad2750a3f65d42cb742a0 Mon Sep 17 00:00:00 2001 From: David Miller Date: Sat, 10 Nov 2012 14:12:19 -0500 Subject: perf tools: Fix mmap limitations on 32-bit This is a suggested patch to fix the bug I reported at: http://marc.info/?l=linux-kernel&m=135033028924652&w=2 Essentially, there is a hard requirement that when perf analyzes a trace, it must have the entire thing mmap()'d. Therefore the scheme used on 32-bit where we have a fixed (8) number of 32MB mmaps, and cycle through them, simply does not work. One of the reasons this requirement exists is because the iterators maintain references to perf entry objects and those references don't just simply go away when this mmap code decides to cycle an old mmap area out and reuse it. At this point, those entry pointers now point to garbage resulting in unpredictable behavior and crashes. It is better to try to mmap() as much as we can and if we do actually run into address space limitations, the failure of the mmap() call will indicate that and stop processing. I noticed that perf_session->mmap_window is set to a constant in one location, and only used in one other location. So I got rid of it altogether. So we adjust the size of the mmaps[] array to the maximum we could need. On 64-bit we only need one slot. On 32-bit we could need up to 128 (128 * 32MB == 4GB). I've verified that this allows a large (~600MB) perf.data file to be analyzed properly with a 32-bit perf binary, which previously was not possible. Signed-off-by: David S. Miller Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20121110.141219.582924082787523608.davem@davemloft.net Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/session.c | 25 ++++++++++++++----------- tools/perf/util/session.h | 1 - 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index d5fb60760bac..aa5e58255cba 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -128,15 +128,6 @@ struct perf_session *perf_session__new(const char *filename, int mode, goto out; memcpy(self->filename, filename, len); - /* - * On 64bit we can mmap the data file in one go. No need for tiny mmap - * slices. On 32bit we use 32MB. - */ -#if BITS_PER_LONG == 64 - self->mmap_window = ULLONG_MAX; -#else - self->mmap_window = 32 * 1024 * 1024ULL; -#endif self->machines = RB_ROOT; self->repipe = repipe; INIT_LIST_HEAD(&self->ordered_samples.samples); @@ -1386,6 +1377,18 @@ fetch_mmaped_event(struct perf_session *session, return event; } +/* + * On 64bit we can mmap the data file in one go. No need for tiny mmap + * slices. On 32bit we use 32MB. + */ +#if BITS_PER_LONG == 64 +#define MMAP_SIZE ULLONG_MAX +#define NUM_MMAPS 1 +#else +#define MMAP_SIZE (32 * 1024 * 1024ULL) +#define NUM_MMAPS 128 +#endif + int __perf_session__process_events(struct perf_session *session, u64 data_offset, u64 data_size, u64 file_size, struct perf_tool *tool) @@ -1393,7 +1396,7 @@ int __perf_session__process_events(struct perf_session *session, u64 head, page_offset, file_offset, file_pos, progress_next; int err, mmap_prot, mmap_flags, map_idx = 0; size_t mmap_size; - char *buf, *mmaps[8]; + char *buf, *mmaps[NUM_MMAPS]; union perf_event *event; uint32_t size; @@ -1408,7 +1411,7 @@ int __perf_session__process_events(struct perf_session *session, progress_next = file_size / 16; - mmap_size = session->mmap_window; + mmap_size = MMAP_SIZE; if (mmap_size > file_size) mmap_size = file_size; diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index cea133a6bdf1..c18fabdff19b 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -30,7 +30,6 @@ struct ordered_samples { struct perf_session { struct perf_header header; unsigned long size; - unsigned long mmap_window; struct machine host_machine; struct rb_root machines; struct perf_evlist *evlist; -- cgit v1.2.3-59-g8ed1b From 1240005e0d3a7e03c2fd05603fb01676e5a004f7 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 13 Oct 2012 00:06:16 +0200 Subject: perf hists: Introduce perf_hpp__list for period related columns Adding perf_hpp__list list to register and contain all period related columns the command is interested in. This way we get rid of static array holding all possible columns and enable commands to register their own columns. It'll be handy for diff command in future to process and display data for multiple files. Signed-off-by: Jiri Olsa Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Paul Mackerras Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Namhyung Kim Link: http://lkml.kernel.org/n/tip-kiykge4igrcl7etmpmveto1h@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-diff.c | 21 ++++----- tools/perf/builtin-report.c | 1 + tools/perf/ui/browsers/hists.c | 20 +++++---- tools/perf/ui/gtk/browser.c | 30 +++++-------- tools/perf/ui/hist.c | 96 +++++++++++++++++++++++------------------- tools/perf/ui/setup.c | 1 + tools/perf/ui/stdio/hist.c | 17 +++----- tools/perf/util/hist.h | 11 ++++- 8 files changed, 101 insertions(+), 96 deletions(-) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 93b852f8a5d5..9fbbc01c5ad7 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -597,40 +597,35 @@ static const struct option options[] = { static void ui_init(void) { - perf_hpp__init(); - - /* No overhead column. */ - perf_hpp__column_enable(PERF_HPP__OVERHEAD, false); - /* * Display baseline/delta/ratio/displacement/ * formula/periods columns. */ - perf_hpp__column_enable(PERF_HPP__BASELINE, true); + perf_hpp__column_enable(PERF_HPP__BASELINE); switch (compute) { case COMPUTE_DELTA: - perf_hpp__column_enable(PERF_HPP__DELTA, true); + perf_hpp__column_enable(PERF_HPP__DELTA); break; case COMPUTE_RATIO: - perf_hpp__column_enable(PERF_HPP__RATIO, true); + perf_hpp__column_enable(PERF_HPP__RATIO); break; case COMPUTE_WEIGHTED_DIFF: - perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF, true); + perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF); break; default: BUG_ON(1); }; if (show_displacement) - perf_hpp__column_enable(PERF_HPP__DISPL, true); + perf_hpp__column_enable(PERF_HPP__DISPL); if (show_formula) - perf_hpp__column_enable(PERF_HPP__FORMULA, true); + perf_hpp__column_enable(PERF_HPP__FORMULA); if (show_period) { - perf_hpp__column_enable(PERF_HPP__PERIOD, true); - perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE, true); + perf_hpp__column_enable(PERF_HPP__PERIOD); + perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE); } } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index fc251005dd3d..5134acf1c39a 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -692,6 +692,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) setup_browser(true); else { use_browser = 0; + perf_hpp__column_enable(PERF_HPP__OVERHEAD); perf_hpp__init(); } diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index ccc4bd161420..57b82c26cd05 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -587,6 +587,8 @@ HPP__COLOR_FN(overhead_guest_us, period_guest_us) void hist_browser__init_hpp(void) { + perf_hpp__column_enable(PERF_HPP__OVERHEAD); + perf_hpp__init(); perf_hpp__format[PERF_HPP__OVERHEAD].color = @@ -607,12 +609,13 @@ static int hist_browser__show_entry(struct hist_browser *browser, { char s[256]; double percent; - int i, printed = 0; + int printed = 0; int width = browser->b.width; char folded_sign = ' '; bool current_entry = ui_browser__is_current_entry(&browser->b, row); off_t row_offset = entry->row_offset; bool first = true; + struct perf_hpp_fmt *fmt; if (current_entry) { browser->he_selection = entry; @@ -629,12 +632,11 @@ static int hist_browser__show_entry(struct hist_browser *browser, .buf = s, .size = sizeof(s), }; + int i = 0; ui_browser__gotorc(&browser->b, row, 0); - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) - continue; + perf_hpp__for_each_format(fmt) { if (!first) { slsmg_printf(" "); @@ -642,14 +644,14 @@ static int hist_browser__show_entry(struct hist_browser *browser, } first = false; - if (perf_hpp__format[i].color) { + if (fmt->color) { hpp.ptr = &percent; /* It will set percent for us. See HPP__COLOR_FN above. */ - width -= perf_hpp__format[i].color(&hpp, entry); + width -= fmt->color(&hpp, entry); ui_browser__set_percent_color(&browser->b, percent, current_entry); - if (i == PERF_HPP__OVERHEAD && symbol_conf.use_callchain) { + if (!i && symbol_conf.use_callchain) { slsmg_printf("%c ", folded_sign); width -= 2; } @@ -659,9 +661,11 @@ static int hist_browser__show_entry(struct hist_browser *browser, if (!current_entry || !browser->b.navkeypressed) ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL); } else { - width -= perf_hpp__format[i].entry(&hpp, entry); + width -= fmt->entry(&hpp, entry); slsmg_printf("%s", s); } + + i++; } /* The scroll bar isn't being used */ diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c index 253b6219a39e..e59ba337f494 100644 --- a/tools/perf/ui/gtk/browser.c +++ b/tools/perf/ui/gtk/browser.c @@ -74,6 +74,8 @@ HPP__COLOR_FN(overhead_guest_us, period_guest_us) void perf_gtk__init_hpp(void) { + perf_hpp__column_enable(PERF_HPP__OVERHEAD); + perf_hpp__init(); perf_hpp__format[PERF_HPP__OVERHEAD].color = @@ -90,13 +92,14 @@ void perf_gtk__init_hpp(void) static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) { + struct perf_hpp_fmt *fmt; GType col_types[MAX_COLUMNS]; GtkCellRenderer *renderer; struct sort_entry *se; GtkListStore *store; struct rb_node *nd; GtkWidget *view; - int i, col_idx; + int col_idx; int nr_cols; char s[512]; @@ -107,12 +110,8 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) nr_cols = 0; - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) - continue; - + perf_hpp__for_each_format(fmt) col_types[nr_cols++] = G_TYPE_STRING; - } list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) @@ -129,12 +128,8 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) col_idx = 0; - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) - continue; - - perf_hpp__format[i].header(&hpp); - + perf_hpp__for_each_format(fmt) { + fmt->header(&hpp); gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), -1, s, renderer, "markup", @@ -166,14 +161,11 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) col_idx = 0; - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) - continue; - - if (perf_hpp__format[i].color) - perf_hpp__format[i].color(&hpp, h); + perf_hpp__for_each_format(fmt) { + if (fmt->color) + fmt->color(&hpp, h); else - perf_hpp__format[i].entry(&hpp, h); + fmt->entry(&hpp, h); gtk_list_store_set(store, &iter, col_idx++, s, -1); } diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index aa84130024d5..0a5281fe41d6 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -386,60 +386,71 @@ static int hpp__entry_formula(struct perf_hpp *hpp, struct hist_entry *he) return scnprintf(hpp->buf, hpp->size, fmt, buf); } -#define HPP__COLOR_PRINT_FNS(_name) \ - .header = hpp__header_ ## _name, \ - .width = hpp__width_ ## _name, \ - .color = hpp__color_ ## _name, \ - .entry = hpp__entry_ ## _name +#define HPP__COLOR_PRINT_FNS(_name) \ + { \ + .header = hpp__header_ ## _name, \ + .width = hpp__width_ ## _name, \ + .color = hpp__color_ ## _name, \ + .entry = hpp__entry_ ## _name \ + } -#define HPP__PRINT_FNS(_name) \ - .header = hpp__header_ ## _name, \ - .width = hpp__width_ ## _name, \ - .entry = hpp__entry_ ## _name +#define HPP__PRINT_FNS(_name) \ + { \ + .header = hpp__header_ ## _name, \ + .width = hpp__width_ ## _name, \ + .entry = hpp__entry_ ## _name \ + } struct perf_hpp_fmt perf_hpp__format[] = { - { .cond = false, HPP__COLOR_PRINT_FNS(baseline) }, - { .cond = true, HPP__COLOR_PRINT_FNS(overhead) }, - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_sys) }, - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_us) }, - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_sys) }, - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_us) }, - { .cond = false, HPP__PRINT_FNS(samples) }, - { .cond = false, HPP__PRINT_FNS(period) }, - { .cond = false, HPP__PRINT_FNS(period_baseline) }, - { .cond = false, HPP__PRINT_FNS(delta) }, - { .cond = false, HPP__PRINT_FNS(ratio) }, - { .cond = false, HPP__PRINT_FNS(wdiff) }, - { .cond = false, HPP__PRINT_FNS(displ) }, - { .cond = false, HPP__PRINT_FNS(formula) } + HPP__COLOR_PRINT_FNS(baseline), + HPP__COLOR_PRINT_FNS(overhead), + HPP__COLOR_PRINT_FNS(overhead_sys), + HPP__COLOR_PRINT_FNS(overhead_us), + HPP__COLOR_PRINT_FNS(overhead_guest_sys), + HPP__COLOR_PRINT_FNS(overhead_guest_us), + HPP__PRINT_FNS(samples), + HPP__PRINT_FNS(period), + HPP__PRINT_FNS(period_baseline), + HPP__PRINT_FNS(delta), + HPP__PRINT_FNS(ratio), + HPP__PRINT_FNS(wdiff), + HPP__PRINT_FNS(displ), + HPP__PRINT_FNS(formula) }; +LIST_HEAD(perf_hpp__list); + #undef HPP__COLOR_PRINT_FNS #undef HPP__PRINT_FNS void perf_hpp__init(void) { if (symbol_conf.show_cpu_utilization) { - perf_hpp__format[PERF_HPP__OVERHEAD_SYS].cond = true; - perf_hpp__format[PERF_HPP__OVERHEAD_US].cond = true; + perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS); + perf_hpp__column_enable(PERF_HPP__OVERHEAD_US); if (perf_guest) { - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].cond = true; - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].cond = true; + perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS); + perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US); } } if (symbol_conf.show_nr_samples) - perf_hpp__format[PERF_HPP__SAMPLES].cond = true; + perf_hpp__column_enable(PERF_HPP__SAMPLES); if (symbol_conf.show_total_period) - perf_hpp__format[PERF_HPP__PERIOD].cond = true; + perf_hpp__column_enable(PERF_HPP__PERIOD); } -void perf_hpp__column_enable(unsigned col, bool enable) +void perf_hpp__column_register(struct perf_hpp_fmt *format) +{ + list_add_tail(&format->list, &perf_hpp__list); +} + +void perf_hpp__column_enable(unsigned col) { BUG_ON(col >= PERF_HPP__MAX_INDEX); - perf_hpp__format[col].cond = enable; + perf_hpp__column_register(&perf_hpp__format[col]); } static inline void advance_hpp(struct perf_hpp *hpp, int inc) @@ -452,27 +463,25 @@ int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, bool color) { const char *sep = symbol_conf.field_sep; + struct perf_hpp_fmt *fmt; char *start = hpp->buf; - int i, ret; + int ret; bool first = true; if (symbol_conf.exclude_other && !he->parent) return 0; - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) - continue; - + perf_hpp__for_each_format(fmt) { if (!sep || !first) { ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); advance_hpp(hpp, ret); first = false; } - if (color && perf_hpp__format[i].color) - ret = perf_hpp__format[i].color(hpp, he); + if (color && fmt->color) + ret = fmt->color(hpp, he); else - ret = perf_hpp__format[i].entry(hpp, he); + ret = fmt->entry(hpp, he); advance_hpp(hpp, ret); } @@ -504,16 +513,15 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size, */ unsigned int hists__sort_list_width(struct hists *hists) { + struct perf_hpp_fmt *fmt; struct sort_entry *se; - int i, ret = 0; + int i = 0, ret = 0; - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) - continue; + perf_hpp__for_each_format(fmt) { if (i) ret += 2; - ret += perf_hpp__format[i].width(NULL); + ret += fmt->width(NULL); } list_for_each_entry(se, &hist_entry__sort_list, list) diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c index ebb4cc107876..166f13df3134 100644 --- a/tools/perf/ui/setup.c +++ b/tools/perf/ui/setup.c @@ -30,6 +30,7 @@ void setup_browser(bool fallback_to_pager) if (fallback_to_pager) setup_pager(); + perf_hpp__column_enable(PERF_HPP__OVERHEAD); perf_hpp__init(); break; } diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c index f0ee204f99bb..0eae3b2c32f2 100644 --- a/tools/perf/ui/stdio/hist.c +++ b/tools/perf/ui/stdio/hist.c @@ -335,13 +335,14 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size, size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, int max_cols, FILE *fp) { + struct perf_hpp_fmt *fmt; struct sort_entry *se; struct rb_node *nd; size_t ret = 0; unsigned int width; const char *sep = symbol_conf.field_sep; const char *col_width = symbol_conf.col_width_list_str; - int idx, nr_rows = 0; + int nr_rows = 0; char bf[96]; struct perf_hpp dummy_hpp = { .buf = bf, @@ -355,16 +356,14 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, goto print_entries; fprintf(fp, "# "); - for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { - if (!perf_hpp__format[idx].cond) - continue; + perf_hpp__for_each_format(fmt) { if (!first) fprintf(fp, "%s", sep ?: " "); else first = false; - perf_hpp__format[idx].header(&dummy_hpp); + fmt->header(&dummy_hpp); fprintf(fp, "%s", bf); } @@ -400,18 +399,16 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, first = true; fprintf(fp, "# "); - for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { - unsigned int i; - if (!perf_hpp__format[idx].cond) - continue; + perf_hpp__for_each_format(fmt) { + unsigned int i; if (!first) fprintf(fp, "%s", sep ?: " "); else first = false; - width = perf_hpp__format[idx].width(&dummy_hpp); + width = fmt->width(&dummy_hpp); for (i = 0; i < width; i++) fprintf(fp, "."); } diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 8b091a51e4a2..a935a60d521c 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -126,13 +126,19 @@ struct perf_hpp { }; struct perf_hpp_fmt { - bool cond; int (*header)(struct perf_hpp *hpp); int (*width)(struct perf_hpp *hpp); int (*color)(struct perf_hpp *hpp, struct hist_entry *he); int (*entry)(struct perf_hpp *hpp, struct hist_entry *he); + + struct list_head list; }; +extern struct list_head perf_hpp__list; + +#define perf_hpp__for_each_format(format) \ + list_for_each_entry(format, &perf_hpp__list, list) + extern struct perf_hpp_fmt perf_hpp__format[]; enum { @@ -155,7 +161,8 @@ enum { }; void perf_hpp__init(void); -void perf_hpp__column_enable(unsigned col, bool enable); +void perf_hpp__column_register(struct perf_hpp_fmt *format); +void perf_hpp__column_enable(unsigned col); int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, bool color); -- cgit v1.2.3-59-g8ed1b From c0d246b85fc7d42688d7a5d999ea671777caf65b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 20 Oct 2012 22:14:10 +0200 Subject: perf hists: Fix period symbol_conf.field_sep display Currently we don't properly display hist data with symbol_conf.field_sep separator. We need to display either space or separator. Signed-off-by: Jiri Olsa Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Paul Mackerras Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Namhyung Kim Link: http://lkml.kernel.org/n/tip-cyggwys0bz5kqdowwvfd8h72@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/hist.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 0a5281fe41d6..6e639b506829 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -472,11 +472,15 @@ int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, return 0; perf_hpp__for_each_format(fmt) { + /* + * If there's no field_sep, we still need + * to display initial ' '. + */ if (!sep || !first) { ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); advance_hpp(hpp, ret); + } else first = false; - } if (color && fmt->color) ret = fmt->color(hpp, he); -- cgit v1.2.3-59-g8ed1b From 3843b05d6ef20acbd4b0043e4ab7536602eb10ae Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 21 Nov 2012 13:49:44 +0100 Subject: perf symbols: Ignore ABS symbols when loading data maps When loading symbols in a data mapping, ABS symbols (which has a value of SHN_ABS in its st_shndx) failed at elf_getscn(). And it marks the loading as a failure so already loaded symbols cannot be fixed up. I'm not sure what should be done. Just ignore them for now. :) Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1353502185-26521-19-git-send-email-eranian@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index db0cc92cf2ea..f63557b59c06 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -718,6 +718,17 @@ int dso__load_sym(struct dso *dso, struct map *map, sym.st_value); used_opd = true; } + /* + * When loading symbols in a data mapping, ABS symbols (which + * has a value of SHN_ABS in its st_shndx) failed at + * elf_getscn(). And it marks the loading as a failure so + * already loaded symbols cannot be fixed up. + * + * I'm not sure what should be done. Just ignore them for now. + * - Namhyung Kim + */ + if (sym.st_shndx == SHN_ABS) + continue; sec = elf_getscn(runtime_ss->elf, sym.st_shndx); if (!sec) -- cgit v1.2.3-59-g8ed1b From 2850d9487266a55a18d340c24f5107696deeebb9 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 29 Nov 2012 14:38:40 -0300 Subject: perf hists: Fix typo on hist__entry_add_pair Fix a misplaced underscore. In this case, 'hist_entry' is the name of data structure and we usually put double underscores between data structure and actual function name. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa , Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-8jdq8g6kl6v54hkexrfwsy72@git.kernel.org [ committer note: put it in front of the patch queue where it came from ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/hist.c | 4 ++-- tools/perf/util/sort.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index cb17e2a8c6ed..d2bc05cd1708 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -785,7 +785,7 @@ void hists__match(struct hists *leader, struct hists *other) pair = hists__find_entry(other, pos); if (pair) - hist__entry_add_pair(pos, pair); + hist_entry__add_pair(pos, pair); } } @@ -806,7 +806,7 @@ int hists__link(struct hists *leader, struct hists *other) pair = hists__add_dummy_entry(leader, pos); if (pair == NULL) return -1; - hist__entry_add_pair(pair, pos); + hist_entry__add_pair(pair, pos); } } diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index b4e8c3ba559d..91ae2744e7d5 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -118,7 +118,7 @@ static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he) return NULL; } -static inline void hist__entry_add_pair(struct hist_entry *he, +static inline void hist_entry__add_pair(struct hist_entry *he, struct hist_entry *pair) { list_add_tail(&he->pairs.head, &pair->pairs.node); -- cgit v1.2.3-59-g8ed1b From 5fa9041bbaa7a79a67d568b9c9f947db2f23d091 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 29 Nov 2012 15:38:34 +0900 Subject: perf hists: Link hist entry pairs to leader Current hists__match/link() link a leader to its pair, so if multiple pairs were linked, the leader will lose pointer to previous pairs since it was overwritten. Fix it by making leader the list head. Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1354171126-14387-8-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/hist.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index d2bc05cd1708..82df1b26f0d4 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -785,7 +785,7 @@ void hists__match(struct hists *leader, struct hists *other) pair = hists__find_entry(other, pos); if (pair) - hist_entry__add_pair(pos, pair); + hist_entry__add_pair(pair, pos); } } @@ -806,7 +806,7 @@ int hists__link(struct hists *leader, struct hists *other) pair = hists__add_dummy_entry(leader, pos); if (pair == NULL) return -1; - hist_entry__add_pair(pair, pos); + hist_entry__add_pair(pos, pair); } } -- cgit v1.2.3-59-g8ed1b From 2cfda562da7b0b1e0575507ef3efe782d4e218e4 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 29 Nov 2012 15:38:29 +0900 Subject: perf evsel: Set leader evsel's ->leader to itself Currently only non-leader members are set ->leader to the leader evsel of the group and the leader has set NULL. Thus it requires special casing for leader evsels. Set ->leader to itself will remove this. Suggested-by: Arnaldo Carvalho de Melo Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1354171126-14387-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evlist.c | 1 - tools/perf/util/evsel.c | 1 + tools/perf/util/evsel.h | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 705293489e3c..90db2a14e4bb 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -111,7 +111,6 @@ void __perf_evlist__set_leader(struct list_head *list) struct perf_evsel *evsel, *leader; leader = list_entry(list->next, struct perf_evsel, node); - leader->leader = NULL; list_for_each_entry(evsel, list, node) { if (evsel != leader) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 1b16dd1edc8e..7e93418a45d1 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -55,6 +55,7 @@ void perf_evsel__init(struct perf_evsel *evsel, { evsel->idx = idx; evsel->attr = *attr; + evsel->leader = evsel; INIT_LIST_HEAD(&evsel->node); hists__init(&evsel->hists); evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 3d2b8017438c..cbf6d97f0646 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -228,6 +228,6 @@ static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel) static inline bool perf_evsel__is_group_member(const struct perf_evsel *evsel) { - return evsel->leader != NULL; + return evsel->leader != evsel; } #endif /* __PERF_EVSEL_H */ -- cgit v1.2.3-59-g8ed1b From 823254edc66eb44bf612b1dfa4829afa9840f691 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 29 Nov 2012 15:38:30 +0900 Subject: perf evsel: Convert to _is_group_leader method Convert perf_evsel__is_group_member to perf_evsel__is_group_leader. This is because the most usecases are using negative form to check whether the given evsel is a leader or not and it's IMHO somewhat ambiguous - leader also *is* a member of the group. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1354171126-14387-4-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 2 +- tools/perf/tests/parse-events.c | 20 ++++++++++---------- tools/perf/util/evlist.c | 4 ++-- tools/perf/util/evsel.c | 6 +++--- tools/perf/util/evsel.h | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c247faca7127..c12655af2b88 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -153,7 +153,7 @@ retry: } if (!perf_target__has_task(&target) && - !perf_evsel__is_group_member(evsel)) { + perf_evsel__is_group_leader(evsel)) { attr->disabled = 1; attr->enable_on_exec = 1; } diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 32ee478905eb..294ffddfbf42 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -521,7 +521,7 @@ static int test__group1(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); /* cycles:upp */ evsel = perf_evsel__next(evsel); @@ -557,7 +557,7 @@ static int test__group2(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); /* cache-references + :u modifier */ evsel = perf_evsel__next(evsel); @@ -583,7 +583,7 @@ static int test__group2(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); return 0; } @@ -606,7 +606,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); TEST_ASSERT_VAL("wrong group name", !strcmp(leader->group_name, "group1")); @@ -636,7 +636,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); TEST_ASSERT_VAL("wrong group name", !strcmp(leader->group_name, "group2")); @@ -663,7 +663,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); return 0; } @@ -687,7 +687,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); /* instructions:kp + p */ evsel = perf_evsel__next(evsel); @@ -724,7 +724,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); /* instructions + G */ evsel = perf_evsel__next(evsel); @@ -751,7 +751,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); /* instructions:G */ evsel = perf_evsel__next(evsel); @@ -777,7 +777,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); return 0; } diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 90db2a14e4bb..d0e1e821fe85 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -221,7 +221,7 @@ void perf_evlist__disable(struct perf_evlist *evlist) for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { list_for_each_entry(pos, &evlist->entries, node) { - if (perf_evsel__is_group_member(pos)) + if (!perf_evsel__is_group_leader(pos)) continue; for (thread = 0; thread < evlist->threads->nr; thread++) ioctl(FD(pos, cpu, thread), @@ -237,7 +237,7 @@ void perf_evlist__enable(struct perf_evlist *evlist) for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { list_for_each_entry(pos, &evlist->entries, node) { - if (perf_evsel__is_group_member(pos)) + if (!perf_evsel__is_group_leader(pos)) continue; for (thread = 0; thread < evlist->threads->nr; thread++) ioctl(FD(pos, cpu, thread), diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 7e93418a45d1..bb58b05f905f 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -520,14 +520,14 @@ void perf_evsel__config(struct perf_evsel *evsel, * Disabling only independent events or group leaders, * keeping group members enabled. */ - if (!perf_evsel__is_group_member(evsel)) + if (perf_evsel__is_group_leader(evsel)) attr->disabled = 1; /* * Setting enable_on_exec for independent events and * group leaders for traced executed by perf. */ - if (perf_target__none(&opts->target) && !perf_evsel__is_group_member(evsel)) + if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel)) attr->enable_on_exec = 1; } @@ -708,7 +708,7 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) struct perf_evsel *leader = evsel->leader; int fd; - if (!perf_evsel__is_group_member(evsel)) + if (perf_evsel__is_group_leader(evsel)) return -1; /* diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index cbf6d97f0646..3f7ff471de2b 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -226,8 +226,8 @@ static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel) return list_entry(evsel->node.next, struct perf_evsel, node); } -static inline bool perf_evsel__is_group_member(const struct perf_evsel *evsel) +static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel) { - return evsel->leader != evsel; + return evsel->leader == evsel; } #endif /* __PERF_EVSEL_H */ -- cgit v1.2.3-59-g8ed1b From fa283ada1606e687641dc2b157d66ef0f9c9aa8a Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 28 Nov 2012 14:52:39 +0100 Subject: perf diff: Remove displacement from struct hist_entry_diff Removing displacement from struct hist_entry_diff, because it's not used. Displacement is not used for sorting, so there's no reason to pre-calculate it. Signed-off-by: Jiri Olsa Acked-by: Namhyung Kim Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1354110769-2998-5-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 91ae2744e7d5..a1c0d56b6885 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -55,9 +55,6 @@ struct he_stat { struct hist_entry_diff { bool computed; - /* PERF_HPP__DISPL */ - int displacement; - /* PERF_HPP__DELTA */ double period_ratio_delta; -- cgit v1.2.3-59-g8ed1b From 05472daa4d8ab88a071bfcaa3bb47473e4071848 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 28 Nov 2012 14:52:40 +0100 Subject: perf diff: Change compute methods to work with pair directly Changing compute methods to operate over hist entry and its pair directly. This makes the code more obvious and readable, instead of all time checking for pair being != NULL. Signed-off-by: Jiri Olsa Acked-by: Namhyung Kim Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1354110769-2998-6-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-diff.c | 38 +++++++++++++++++--------------------- tools/perf/ui/hist.c | 40 +++++++++++++++++++++++++--------------- tools/perf/util/hist.h | 7 ++++--- 3 files changed, 46 insertions(+), 39 deletions(-) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 9fbbc01c5ad7..342085a18076 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -146,47 +146,40 @@ static int setup_compute(const struct option *opt, const char *str, return -EINVAL; } -static double get_period_percent(struct hist_entry *he, u64 period) +double perf_diff__period_percent(struct hist_entry *he, u64 period) { u64 total = he->hists->stats.total_period; return (period * 100.0) / total; } -double perf_diff__compute_delta(struct hist_entry *he) +double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair) { - struct hist_entry *pair = hist_entry__next_pair(he); - double new_percent = get_period_percent(he, he->stat.period); - double old_percent = pair ? get_period_percent(pair, pair->stat.period) : 0.0; + double new_percent = perf_diff__period_percent(he, he->stat.period); + double old_percent = perf_diff__period_percent(pair, pair->stat.period); he->diff.period_ratio_delta = new_percent - old_percent; he->diff.computed = true; return he->diff.period_ratio_delta; } -double perf_diff__compute_ratio(struct hist_entry *he) +double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair) { - struct hist_entry *pair = hist_entry__next_pair(he); double new_period = he->stat.period; - double old_period = pair ? pair->stat.period : 0; + double old_period = pair->stat.period; he->diff.computed = true; - he->diff.period_ratio = pair ? (new_period / old_period) : 0; + he->diff.period_ratio = new_period / old_period; return he->diff.period_ratio; } -s64 perf_diff__compute_wdiff(struct hist_entry *he) +s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair) { - struct hist_entry *pair = hist_entry__next_pair(he); u64 new_period = he->stat.period; - u64 old_period = pair ? pair->stat.period : 0; + u64 old_period = pair->stat.period; he->diff.computed = true; - - if (!pair) - he->diff.wdiff = 0; - else - he->diff.wdiff = new_period * compute_wdiff_w2 - - old_period * compute_wdiff_w1; + he->diff.wdiff = new_period * compute_wdiff_w2 - + old_period * compute_wdiff_w1; return he->diff.wdiff; } @@ -385,18 +378,21 @@ static void hists__precompute(struct hists *hists) while (next != NULL) { struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); + struct hist_entry *pair = hist_entry__next_pair(he); next = rb_next(&he->rb_node); + if (!pair) + continue; switch (compute) { case COMPUTE_DELTA: - perf_diff__compute_delta(he); + perf_diff__compute_delta(he, pair); break; case COMPUTE_RATIO: - perf_diff__compute_ratio(he); + perf_diff__compute_ratio(he, pair); break; case COMPUTE_WEIGHTED_DIFF: - perf_diff__compute_wdiff(he); + perf_diff__compute_wdiff(he, pair); break; default: BUG_ON(1); diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 6e639b506829..108e5ed67621 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -268,14 +268,18 @@ static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused) static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he) { + struct hist_entry *pair = hist_entry__next_pair(he); const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s"; char buf[32] = " "; - double diff; + double diff = 0.0; - if (he->diff.computed) - diff = he->diff.period_ratio_delta; - else - diff = perf_diff__compute_delta(he); + if (pair) { + if (he->diff.computed) + diff = he->diff.period_ratio_delta; + else + diff = perf_diff__compute_delta(he, pair); + } else + diff = perf_diff__period_percent(he, he->stat.period); if (fabs(diff) >= 0.01) scnprintf(buf, sizeof(buf), "%+4.2F%%", diff); @@ -297,14 +301,17 @@ static int hpp__width_ratio(struct perf_hpp *hpp __maybe_unused) static int hpp__entry_ratio(struct perf_hpp *hpp, struct hist_entry *he) { + struct hist_entry *pair = hist_entry__next_pair(he); const char *fmt = symbol_conf.field_sep ? "%s" : "%14s"; char buf[32] = " "; - double ratio; + double ratio = 0.0; - if (he->diff.computed) - ratio = he->diff.period_ratio; - else - ratio = perf_diff__compute_ratio(he); + if (pair) { + if (he->diff.computed) + ratio = he->diff.period_ratio; + else + ratio = perf_diff__compute_ratio(he, pair); + } if (ratio > 0.0) scnprintf(buf, sizeof(buf), "%+14.6F", ratio); @@ -326,14 +333,17 @@ static int hpp__width_wdiff(struct perf_hpp *hpp __maybe_unused) static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he) { + struct hist_entry *pair = hist_entry__next_pair(he); const char *fmt = symbol_conf.field_sep ? "%s" : "%14s"; char buf[32] = " "; - s64 wdiff; + s64 wdiff = 0; - if (he->diff.computed) - wdiff = he->diff.wdiff; - else - wdiff = perf_diff__compute_wdiff(he); + if (pair) { + if (he->diff.computed) + wdiff = he->diff.wdiff; + else + wdiff = perf_diff__compute_wdiff(he, pair); + } if (wdiff != 0) scnprintf(buf, sizeof(buf), "%14ld", wdiff); diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index a935a60d521c..235503aac08c 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -226,8 +226,9 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused, unsigned int hists__sort_list_width(struct hists *self); -double perf_diff__compute_delta(struct hist_entry *he); -double perf_diff__compute_ratio(struct hist_entry *he); -s64 perf_diff__compute_wdiff(struct hist_entry *he); +double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair); +double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair); +s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair); int perf_diff__formula(char *buf, size_t size, struct hist_entry *he); +double perf_diff__period_percent(struct hist_entry *he, u64 period); #endif /* __PERF_HIST_H */ -- cgit v1.2.3-59-g8ed1b From f4c8bae1920c459b7b9c12363d11e8a588862e42 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 28 Nov 2012 14:52:41 +0100 Subject: perf diff: Change formula methods to work with pair directly Changing formula methods to operate over hist entry and its pair directly. This makes the code more obvious and readable, instead of all time checking for pair being != NULL. Signed-off-by: Jiri Olsa Acked-by: Namhyung Kim Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1354110769-2998-7-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-diff.c | 35 +++++++++++++---------------------- tools/perf/ui/hist.c | 5 ++++- tools/perf/util/hist.h | 3 ++- 3 files changed, 19 insertions(+), 24 deletions(-) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 342085a18076..d869029fb75e 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -184,13 +184,9 @@ s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair) return he->diff.wdiff; } -static int formula_delta(struct hist_entry *he, char *buf, size_t size) +static int formula_delta(struct hist_entry *he, struct hist_entry *pair, + char *buf, size_t size) { - struct hist_entry *pair = hist_entry__next_pair(he); - - if (!pair) - return -1; - return scnprintf(buf, size, "(%" PRIu64 " * 100 / %" PRIu64 ") - " "(%" PRIu64 " * 100 / %" PRIu64 ")", @@ -198,41 +194,36 @@ static int formula_delta(struct hist_entry *he, char *buf, size_t size) pair->stat.period, pair->hists->stats.total_period); } -static int formula_ratio(struct hist_entry *he, char *buf, size_t size) +static int formula_ratio(struct hist_entry *he, struct hist_entry *pair, + char *buf, size_t size) { - struct hist_entry *pair = hist_entry__next_pair(he); double new_period = he->stat.period; - double old_period = pair ? pair->stat.period : 0; - - if (!pair) - return -1; + double old_period = pair->stat.period; return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period); } -static int formula_wdiff(struct hist_entry *he, char *buf, size_t size) +static int formula_wdiff(struct hist_entry *he, struct hist_entry *pair, + char *buf, size_t size) { - struct hist_entry *pair = hist_entry__next_pair(he); u64 new_period = he->stat.period; - u64 old_period = pair ? pair->stat.period : 0; - - if (!pair) - return -1; + u64 old_period = pair->stat.period; return scnprintf(buf, size, "(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")", new_period, compute_wdiff_w2, old_period, compute_wdiff_w1); } -int perf_diff__formula(char *buf, size_t size, struct hist_entry *he) +int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair, + char *buf, size_t size) { switch (compute) { case COMPUTE_DELTA: - return formula_delta(he, buf, size); + return formula_delta(he, pair, buf, size); case COMPUTE_RATIO: - return formula_ratio(he, buf, size); + return formula_ratio(he, pair, buf, size); case COMPUTE_WEIGHTED_DIFF: - return formula_wdiff(he, buf, size); + return formula_wdiff(he, pair, buf, size); default: BUG_ON(1); } diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 108e5ed67621..1785bab7adfd 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -389,10 +389,13 @@ static int hpp__width_formula(struct perf_hpp *hpp __maybe_unused) static int hpp__entry_formula(struct perf_hpp *hpp, struct hist_entry *he) { + struct hist_entry *pair = hist_entry__next_pair(he); const char *fmt = symbol_conf.field_sep ? "%s" : "%-70s"; char buf[96] = " "; - perf_diff__formula(buf, sizeof(buf), he); + if (pair) + perf_diff__formula(he, pair, buf, sizeof(buf)); + return scnprintf(hpp->buf, hpp->size, fmt, buf); } diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 235503aac08c..c1b2fade8e70 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -229,6 +229,7 @@ unsigned int hists__sort_list_width(struct hists *self); double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair); double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair); s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair); -int perf_diff__formula(char *buf, size_t size, struct hist_entry *he); +int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair, + char *buf, size_t size); double perf_diff__period_percent(struct hist_entry *he, u64 period); #endif /* __PERF_HIST_H */ -- cgit v1.2.3-59-g8ed1b From 3b761f9bda66c24c2686ae795e263cb49f6d8ef4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 29 Nov 2012 16:34:21 -0300 Subject: perf tools: Don't check configuration on make tags Doing the same thing done in: b059dee: perf tools: Don't check configuration on make clean Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-n2ni4riphpqxw7d6ziv1ndyc@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/Makefile | 2 ++ tools/perf/Makefile | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index f6e5901c8f5f..eb30044a922a 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile @@ -223,11 +223,13 @@ install-pdf: pdf # '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),tags) $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE -include $(OUTPUT)PERF-VERSION-FILE endif +endif # # Determine "include::" file references in asciidoc files. diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 08da9fc616c4..2a07b957eac9 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -154,6 +154,7 @@ INSTALL = install SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),tags) -include config/feature-tests.mak ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y) @@ -207,6 +208,7 @@ ifeq ($(call try-cc,$(SOURCE_BIONIC),$(CFLAGS),bionic),y) EXTLIBS := $(filter-out -lpthread,$(EXTLIBS)) BASIC_CFLAGS += -I. endif +endif # MAKECMDGOALS != tags endif # MAKECMDGOALS != clean # Guard against environment variables @@ -525,6 +527,7 @@ PERFLIBS = $(LIB_FILE) $(LIBTRACEEVENT) # Platform specific tweaks # ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),tags) # We choose to avoid "if .. else if .. else .. endif endif" # because maintaining the nesting to match is a pain. If @@ -841,6 +844,7 @@ ifdef ASCIIDOC8 export ASCIIDOC8 endif +endif # MAKECMDGOALS != tags endif # MAKECMDGOALS != clean # Shell quote (do not use $(call) to accommodate ancient setups); -- cgit v1.2.3-59-g8ed1b From 044c4f8fbab513af399ff8ce7a2af2d303d85849 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 29 Nov 2012 16:51:01 -0300 Subject: perf tools: Fix TUI helpline output In commit e2f4351 "perf ui/helpline: Introduce ui_helpline__vshow()" the test for the browser used made ui_helpline__vshow() to be called only for the GTK browser. The TUI one then was not used and vfprintf(stderr, ...) was used instead, making the TUI scroll the screen instead of just printing on the last line. Fix it by doing the proper check, that is to call ui_helpline__vshow to be called for both the TUI and GTK browsers. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-iad0nw09x4orhmn0uzz4ljx3@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 391c9a9e946f..399e74c34c1a 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -23,7 +23,7 @@ int eprintf(int level, const char *fmt, ...) if (verbose >= level) { va_start(args, fmt); - if (use_browser > 1) + if (use_browser >= 1) ui_helpline__vshow(fmt, args); else ret = vfprintf(stderr, fmt, args); -- cgit v1.2.3-59-g8ed1b From f0bf9107679f3670e5fbd52a934b7816256007f7 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 5 Dec 2012 16:24:05 -0300 Subject: perf buildid-list: We need to check if a file is ELF first I.e. before we try to use it as a perf.data file by calling perf_session__new, otherwise we lose the feature that shows the build id for the given ELF file, this one: [root@sandy redhat-perfdata-mtech-15]# perf buildid-list -i /root/.debug/.build-id/97/54896de655b6ac088ec2bf5113b35c06f72709 9754896de655b6ac088ec2bf5113b35c06f72709 [root@sandy redhat-perfdata-mtech-15]# perf buildid-list -i /lib/libc-2.12.so 38adaeff4f7c21899b13b28c1a2e6c199ca4c744 [root@sandy redhat-perfdata-mtech-15]# Regression introduced in: efad1415 "perf report: Accept fifos as input file" Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-3ktgyg83fwpqyfpoj0t2ezp0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-buildid-list.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index a82d99fec83e..4c770d252fd2 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c @@ -49,18 +49,16 @@ static int perf_session__list_build_ids(bool force, bool with_hits) struct perf_session *session; symbol__elf_init(); - - session = perf_session__new(input_name, O_RDONLY, force, false, - &build_id__mark_dso_hit_ops); - if (session == NULL) - return -1; - /* * See if this is an ELF file first: */ - if (filename__fprintf_build_id(session->filename, stdout)) + if (filename__fprintf_build_id(input_name, stdout)) goto out; + session = perf_session__new(input_name, O_RDONLY, force, false, + &build_id__mark_dso_hit_ops); + if (session == NULL) + return -1; /* * in pipe-mode, the only way to get the buildids is to parse * the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID @@ -69,8 +67,8 @@ static int perf_session__list_build_ids(bool force, bool with_hits) perf_session__process_events(session, &build_id__mark_dso_hit_ops); perf_session__fprintf_dsos_buildid(session, stdout, with_hits); -out: perf_session__delete(session); +out: return 0; } -- cgit v1.2.3-59-g8ed1b From db6d0bb86164497f6c9ef46020cf1881953f4b08 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 6 Dec 2012 14:22:28 +0100 Subject: perf diff: Remove displacement output option It seems not very useful, because it's possible and event more convenient to lookup related symbol by name. Also the output value for both 'baseline' and 'new' data is quite apparent from diff output. And above all it complicates hist code factoring ;) Ditching out PERF_HPP__DISPL column with related output functions. Suggested-by: Arnaldo Carvalho de Melo Signed-off-by: Jiri Olsa Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Ingo Molnar Cc: Namhyung Kim Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/20121206132228.GB1080@krava.brq.redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-diff.txt | 4 ---- tools/perf/builtin-diff.c | 29 +++++++---------------------- tools/perf/ui/hist.c | 25 ------------------------- tools/perf/util/hist.h | 1 - 4 files changed, 7 insertions(+), 52 deletions(-) diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt index 194f37d635df..5b3123d5721f 100644 --- a/tools/perf/Documentation/perf-diff.txt +++ b/tools/perf/Documentation/perf-diff.txt @@ -22,10 +22,6 @@ specified perf.data files. OPTIONS ------- --M:: ---displacement:: - Show position displacement relative to baseline. - -D:: --dump-raw-trace:: Dump raw trace in ASCII. diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index d869029fb75e..b2e7d39f099b 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -23,7 +23,6 @@ static char const *input_old = "perf.data.old", *input_new = "perf.data"; static char diff__default_sort_order[] = "dso,symbol"; static bool force; -static bool show_displacement; static bool show_period; static bool show_formula; static bool show_baseline_only; @@ -296,9 +295,8 @@ static void insert_hist_entry_by_name(struct rb_root *root, rb_insert_color(&he->rb_node, root); } -static void hists__name_resort(struct hists *self, bool sort) +static void hists__name_resort(struct hists *self) { - unsigned long position = 1; struct rb_root tmp = RB_ROOT; struct rb_node *next = rb_first(&self->entries); @@ -306,16 +304,12 @@ static void hists__name_resort(struct hists *self, bool sort) struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); - n->position = position++; - if (sort) { - rb_erase(&n->rb_node, &self->entries); - insert_hist_entry_by_name(&tmp, n); - } + rb_erase(&n->rb_node, &self->entries); + insert_hist_entry_by_name(&tmp, n); } - if (sort) - self->entries = tmp; + self->entries = tmp; } static struct perf_evsel *evsel_match(struct perf_evsel *evsel, @@ -339,12 +333,8 @@ static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name) hists__output_resort(hists); - /* - * The hists__name_resort only sets possition - * if name is false. - */ - if (name || ((!name) && show_displacement)) - hists__name_resort(hists, name); + if (name) + hists__name_resort(hists); } } @@ -549,8 +539,6 @@ static const char * const diff_usage[] = { static const struct option options[] = { OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('M', "displacement", &show_displacement, - "Show position displacement relative to baseline"), OPT_BOOLEAN('b', "baseline-only", &show_baseline_only, "Show only items with match in baseline"), OPT_CALLBACK('c', "compute", &compute, @@ -585,7 +573,7 @@ static const struct option options[] = { static void ui_init(void) { /* - * Display baseline/delta/ratio/displacement/ + * Display baseline/delta/ratio * formula/periods columns. */ perf_hpp__column_enable(PERF_HPP__BASELINE); @@ -604,9 +592,6 @@ static void ui_init(void) BUG_ON(1); }; - if (show_displacement) - perf_hpp__column_enable(PERF_HPP__DISPL); - if (show_formula) perf_hpp__column_enable(PERF_HPP__FORMULA); diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 1785bab7adfd..1889c12ca81f 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -351,30 +351,6 @@ static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he) return scnprintf(hpp->buf, hpp->size, fmt, buf); } -static int hpp__header_displ(struct perf_hpp *hpp) -{ - return scnprintf(hpp->buf, hpp->size, "Displ."); -} - -static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused) -{ - return 6; -} - -static int hpp__entry_displ(struct perf_hpp *hpp, - struct hist_entry *he) -{ - struct hist_entry *pair = hist_entry__next_pair(he); - long displacement = pair ? pair->position - he->position : 0; - const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s"; - char buf[32] = " "; - - if (displacement) - scnprintf(buf, sizeof(buf), "%+4ld", displacement); - - return scnprintf(hpp->buf, hpp->size, fmt, buf); -} - static int hpp__header_formula(struct perf_hpp *hpp) { const char *fmt = symbol_conf.field_sep ? "%s" : "%70s"; @@ -427,7 +403,6 @@ struct perf_hpp_fmt perf_hpp__format[] = { HPP__PRINT_FNS(delta), HPP__PRINT_FNS(ratio), HPP__PRINT_FNS(wdiff), - HPP__PRINT_FNS(displ), HPP__PRINT_FNS(formula) }; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index c1b2fade8e70..5b3b0075be64 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -154,7 +154,6 @@ enum { PERF_HPP__DELTA, PERF_HPP__RATIO, PERF_HPP__WEIGHTED_DIFF, - PERF_HPP__DISPL, PERF_HPP__FORMULA, PERF_HPP__MAX_INDEX -- cgit v1.2.3-59-g8ed1b From 417c2ff6806fd9183cb36682dcf32c4d068aba5e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 7 Dec 2012 09:53:58 -0300 Subject: perf symbols: Generalize filter in __fprintf_buildid methods We had that 'with_hits' filter to show just the build ids for DSOs that had samples, make that generic so that we can use it in the upcoming buildid-cache --missing feature, to show just the build ids that are not in the cache. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-9nfesdfpnx7zp96yn3tmfbx0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-buildid-list.c | 7 ++++++- tools/perf/util/dso.c | 4 ++-- tools/perf/util/dso.h | 2 +- tools/perf/util/machine.h | 8 ++++---- tools/perf/util/session.c | 6 +++--- tools/perf/util/session.h | 4 ++-- tools/perf/util/symbol.c | 12 ++++++------ 7 files changed, 24 insertions(+), 19 deletions(-) diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index 4c770d252fd2..e74366a13218 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c @@ -44,6 +44,11 @@ static int filename__fprintf_build_id(const char *name, FILE *fp) return fprintf(fp, "%s\n", sbuild_id); } +static bool dso__skip_buildid(struct dso *dso, int with_hits) +{ + return with_hits && !dso->hit; +} + static int perf_session__list_build_ids(bool force, bool with_hits) { struct perf_session *session; @@ -66,7 +71,7 @@ static int perf_session__list_build_ids(bool force, bool with_hits) if (with_hits || session->fd_pipe) perf_session__process_events(session, &build_id__mark_dso_hit_ops); - perf_session__fprintf_dsos_buildid(session, stdout, with_hits); + perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits); perf_session__delete(session); out: return 0; diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index be437850edc8..6f7d5a9d6b05 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -539,13 +539,13 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name) } size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, - bool with_hits) + bool (skip)(struct dso *dso, int parm), int parm) { struct dso *pos; size_t ret = 0; list_for_each_entry(pos, head, node) { - if (with_hits && !pos->hit) + if (skip && skip(pos, parm)) continue; ret += dso__fprintf_buildid(pos, fp); ret += fprintf(fp, " %s\n", pos->long_name); diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index e03276940b99..450199ab51b5 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -138,7 +138,7 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name); bool __dsos__read_build_ids(struct list_head *head, bool with_hits); size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, - bool with_hits); + bool (skip)(struct dso *dso, int parm), int parm); size_t __dsos__fprintf(struct list_head *head, FILE *fp); size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index b7cde7467d55..646ad13005d3 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -129,11 +129,11 @@ int machine__load_kallsyms(struct machine *machine, const char *filename, int machine__load_vmlinux_path(struct machine *machine, enum map_type type, symbol_filter_t filter); -size_t machine__fprintf_dsos_buildid(struct machine *machine, - FILE *fp, bool with_hits); +size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm); size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, - FILE *fp, bool with_hits); +size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm); void machine__destroy_kernel_maps(struct machine *machine); int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index aa5e58255cba..8d04dfbc205c 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1552,10 +1552,10 @@ size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) } size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, - bool with_hits) + bool (skip)(struct dso *dso, int parm), int parm) { - size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); - return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); + size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, skip, parm); + return ret + machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm); } size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index c18fabdff19b..8c2302504199 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -115,8 +115,8 @@ size_t perf_session__fprintf(struct perf_session *self, FILE *fp); size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); -size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, - FILE *fp, bool with_hits); +size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, + bool (fn)(struct dso *dso, int parm), int parm); size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 295f8d4feedf..e5ba9840ac22 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1315,21 +1315,21 @@ size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) } size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, - bool with_hits) + bool (skip)(struct dso *dso, int parm), int parm) { - return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) + - __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits); + return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) + + __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm); } -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, - FILE *fp, bool with_hits) +size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) { struct rb_node *nd; size_t ret = 0; for (nd = rb_first(machines); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); + ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); } return ret; } -- cgit v1.2.3-59-g8ed1b From fbb6976c2f7a6ab2c4d8511181d686f5f2aaf476 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 7 Dec 2012 16:28:27 -0300 Subject: perf buildid-cache: Add option to show build ids that are missing in the cache This will allow to connect with services being put in place by distros such as Fedora, where one can retrieve DSOs by their build-id. Example usage: for buildid in $(perf buildid-cache --missing perf.data | cut -d' ' -f1) ; do echo "trying to get $buildid" wget -q https://darkserver.fedoraproject.org/buildids/$buildid cat $buildid ; echo rm -f $buildid done Now its just a matter of some porcelain to get the details provided by such a service, retrieve the file and use 'perf buildid-cache --add $FILE' to insert it in the cache, then use 'perf report' or 'annotate' that will find the required files in the cache. More information about the darkserver service at: https://darkserver.fedoraproject.org/ Cc: David Ahern Cc: Frank Eigler Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Kushal Das Cc: Mark Wielaard Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-6fuktuiyjn4jykxmt7c9f7xq@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-buildid-cache.txt | 3 ++ tools/perf/builtin-buildid-cache.c | 48 +++++++++++++++++++++++-- 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt index c1057701a7dc..8e798baae0fd 100644 --- a/tools/perf/Documentation/perf-buildid-cache.txt +++ b/tools/perf/Documentation/perf-buildid-cache.txt @@ -24,6 +24,9 @@ OPTIONS -r:: --remove=:: Remove specified file from the cache. +-M:: +--missing=:: + List missing build ids in the cache for the specified file. -v:: --verbose:: Be more verbose. diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index fae8b250b2ca..a336014e0286 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -14,6 +14,7 @@ #include "util/parse-options.h" #include "util/strlist.h" #include "util/build-id.h" +#include "util/session.h" #include "util/symbol.h" static int build_id_cache__add_file(const char *filename, const char *debugdir) @@ -58,19 +59,59 @@ static int build_id_cache__remove_file(const char *filename, return err; } +static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused) +{ + char filename[PATH_MAX]; + u8 build_id[BUILD_ID_SIZE]; + + if (dso__build_id_filename(dso, filename, sizeof(filename)) && + filename__read_build_id(filename, build_id, + sizeof(build_id)) != sizeof(build_id)) { + if (errno == ENOENT) + return false; + + pr_warning("Problems with %s file, consider removing it from the cache\n", + filename); + } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) { + pr_warning("Problems with %s file, consider removing it from the cache\n", + filename); + } + + return true; +} + +static int build_id_cache__fprintf_missing(const char *filename, bool force, FILE *fp) +{ + struct perf_session *session = perf_session__new(filename, O_RDONLY, + force, false, NULL); + if (session == NULL) + return -1; + + perf_session__fprintf_dsos_buildid(session, fp, dso__missing_buildid_cache, 0); + perf_session__delete(session); + + return 0; +} + int cmd_buildid_cache(int argc, const char **argv, const char *prefix __maybe_unused) { struct strlist *list; struct str_node *pos; + int ret = 0; + bool force = false; char debugdir[PATH_MAX]; char const *add_name_list_str = NULL, - *remove_name_list_str = NULL; + *remove_name_list_str = NULL, + *missing_filename = NULL; const struct option buildid_cache_options[] = { OPT_STRING('a', "add", &add_name_list_str, "file list", "file(s) to add"), OPT_STRING('r', "remove", &remove_name_list_str, "file list", "file(s) to remove"), + OPT_STRING('M', "missing", &missing_filename, "file", + "to find missing build ids in the cache"), + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), OPT_INCR('v', "verbose", &verbose, "be more verbose"), OPT_END() }; @@ -125,5 +166,8 @@ int cmd_buildid_cache(int argc, const char **argv, } } - return 0; + if (missing_filename) + ret = build_id_cache__fprintf_missing(missing_filename, force, stdout); + + return ret; } -- cgit v1.2.3-59-g8ed1b From 3f067dcab711c2df7eefcfc5b3aa9a0e2b5f7d42 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 7 Dec 2012 17:39:39 -0300 Subject: perf machine: Move more machine methods to machine.c Mechanical, no functional changes. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-9ib6qtqge1jmms2luwu4udbx@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 742 ++++++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/machine.h | 3 +- tools/perf/util/session.c | 210 ------------- tools/perf/util/symbol.c | 522 +------------------------------- tools/perf/util/symbol.h | 4 + tools/perf/util/thread.c | 20 +- tools/perf/util/thread.h | 1 + 7 files changed, 756 insertions(+), 746 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 1f09d0581e6b..71fa90391fe4 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1,10 +1,15 @@ +#include "callchain.h" #include "debug.h" #include "event.h" +#include "evsel.h" +#include "hist.h" #include "machine.h" #include "map.h" +#include "sort.h" #include "strlist.h" #include "thread.h" #include +#include "unwind.h" int machine__init(struct machine *machine, const char *root_dir, pid_t pid) { @@ -48,6 +53,29 @@ static void dsos__delete(struct list_head *dsos) } } +void machine__delete_dead_threads(struct machine *machine) +{ + struct thread *n, *t; + + list_for_each_entry_safe(t, n, &machine->dead_threads, node) { + list_del(&t->node); + thread__delete(t); + } +} + +void machine__delete_threads(struct machine *machine) +{ + struct rb_node *nd = rb_first(&machine->threads); + + while (nd) { + struct thread *t = rb_entry(nd, struct thread, rb_node); + + rb_erase(&t->rb_node, &machine->threads); + nd = rb_next(nd); + thread__delete(t); + } +} + void machine__exit(struct machine *machine) { map_groups__exit(&machine->kmaps); @@ -264,6 +292,534 @@ int machine__process_lost_event(struct machine *machine __maybe_unused, return 0; } +struct map *machine__new_module(struct machine *machine, u64 start, + const char *filename) +{ + struct map *map; + struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); + + if (dso == NULL) + return NULL; + + map = map__new2(start, dso, MAP__FUNCTION); + if (map == NULL) + return NULL; + + if (machine__is_host(machine)) + dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; + else + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; + map_groups__insert(&machine->kmaps, map); + return map; +} + +size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) +{ + struct rb_node *nd; + size_t ret = 0; + + for (nd = rb_first(machines); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret += __dsos__fprintf(&pos->kernel_dsos, fp); + ret += __dsos__fprintf(&pos->user_dsos, fp); + } + + return ret; +} + +size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) +{ + return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) + + __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm); +} + +size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) +{ + struct rb_node *nd; + size_t ret = 0; + + for (nd = rb_first(machines); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); + } + return ret; +} + +size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) +{ + int i; + size_t printed = 0; + struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; + + if (kdso->has_build_id) { + char filename[PATH_MAX]; + if (dso__build_id_filename(kdso, filename, sizeof(filename))) + printed += fprintf(fp, "[0] %s\n", filename); + } + + for (i = 0; i < vmlinux_path__nr_entries; ++i) + printed += fprintf(fp, "[%d] %s\n", + i + kdso->has_build_id, vmlinux_path[i]); + + return printed; +} + +size_t machine__fprintf(struct machine *machine, FILE *fp) +{ + size_t ret = 0; + struct rb_node *nd; + + for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { + struct thread *pos = rb_entry(nd, struct thread, rb_node); + + ret += thread__fprintf(pos, fp); + } + + return ret; +} + +static struct dso *machine__get_kernel(struct machine *machine) +{ + const char *vmlinux_name = NULL; + struct dso *kernel; + + if (machine__is_host(machine)) { + vmlinux_name = symbol_conf.vmlinux_name; + if (!vmlinux_name) + vmlinux_name = "[kernel.kallsyms]"; + + kernel = dso__kernel_findnew(machine, vmlinux_name, + "[kernel]", + DSO_TYPE_KERNEL); + } else { + char bf[PATH_MAX]; + + if (machine__is_default_guest(machine)) + vmlinux_name = symbol_conf.default_guest_vmlinux_name; + if (!vmlinux_name) + vmlinux_name = machine__mmap_name(machine, bf, + sizeof(bf)); + + kernel = dso__kernel_findnew(machine, vmlinux_name, + "[guest.kernel]", + DSO_TYPE_GUEST_KERNEL); + } + + if (kernel != NULL && (!kernel->has_build_id)) + dso__read_running_kernel_build_id(kernel, machine); + + return kernel; +} + +struct process_args { + u64 start; +}; + +static int symbol__in_kernel(void *arg, const char *name, + char type __maybe_unused, u64 start) +{ + struct process_args *args = arg; + + if (strchr(name, '[')) + return 0; + + args->start = start; + return 1; +} + +/* Figure out the start address of kernel map from /proc/kallsyms */ +static u64 machine__get_kernel_start_addr(struct machine *machine) +{ + const char *filename; + char path[PATH_MAX]; + struct process_args args; + + if (machine__is_host(machine)) { + filename = "/proc/kallsyms"; + } else { + if (machine__is_default_guest(machine)) + filename = (char *)symbol_conf.default_guest_kallsyms; + else { + sprintf(path, "%s/proc/kallsyms", machine->root_dir); + filename = path; + } + } + + if (symbol__restricted_filename(filename, "/proc/kallsyms")) + return 0; + + if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) + return 0; + + return args.start; +} + +int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) +{ + enum map_type type; + u64 start = machine__get_kernel_start_addr(machine); + + for (type = 0; type < MAP__NR_TYPES; ++type) { + struct kmap *kmap; + + machine->vmlinux_maps[type] = map__new2(start, kernel, type); + if (machine->vmlinux_maps[type] == NULL) + return -1; + + machine->vmlinux_maps[type]->map_ip = + machine->vmlinux_maps[type]->unmap_ip = + identity__map_ip; + kmap = map__kmap(machine->vmlinux_maps[type]); + kmap->kmaps = &machine->kmaps; + map_groups__insert(&machine->kmaps, + machine->vmlinux_maps[type]); + } + + return 0; +} + +void machine__destroy_kernel_maps(struct machine *machine) +{ + enum map_type type; + + for (type = 0; type < MAP__NR_TYPES; ++type) { + struct kmap *kmap; + + if (machine->vmlinux_maps[type] == NULL) + continue; + + kmap = map__kmap(machine->vmlinux_maps[type]); + map_groups__remove(&machine->kmaps, + machine->vmlinux_maps[type]); + if (kmap->ref_reloc_sym) { + /* + * ref_reloc_sym is shared among all maps, so free just + * on one of them. + */ + if (type == MAP__FUNCTION) { + free((char *)kmap->ref_reloc_sym->name); + kmap->ref_reloc_sym->name = NULL; + free(kmap->ref_reloc_sym); + } + kmap->ref_reloc_sym = NULL; + } + + map__delete(machine->vmlinux_maps[type]); + machine->vmlinux_maps[type] = NULL; + } +} + +int machines__create_guest_kernel_maps(struct rb_root *machines) +{ + int ret = 0; + struct dirent **namelist = NULL; + int i, items = 0; + char path[PATH_MAX]; + pid_t pid; + char *endp; + + if (symbol_conf.default_guest_vmlinux_name || + symbol_conf.default_guest_modules || + symbol_conf.default_guest_kallsyms) { + machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); + } + + if (symbol_conf.guestmount) { + items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); + if (items <= 0) + return -ENOENT; + for (i = 0; i < items; i++) { + if (!isdigit(namelist[i]->d_name[0])) { + /* Filter out . and .. */ + continue; + } + pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); + if ((*endp != '\0') || + (endp == namelist[i]->d_name) || + (errno == ERANGE)) { + pr_debug("invalid directory (%s). Skipping.\n", + namelist[i]->d_name); + continue; + } + sprintf(path, "%s/%s/proc/kallsyms", + symbol_conf.guestmount, + namelist[i]->d_name); + ret = access(path, R_OK); + if (ret) { + pr_debug("Can't access file %s\n", path); + goto failure; + } + machines__create_kernel_maps(machines, pid); + } +failure: + free(namelist); + } + + return ret; +} + +void machines__destroy_guest_kernel_maps(struct rb_root *machines) +{ + struct rb_node *next = rb_first(machines); + + while (next) { + struct machine *pos = rb_entry(next, struct machine, rb_node); + + next = rb_next(&pos->rb_node); + rb_erase(&pos->rb_node, machines); + machine__delete(pos); + } +} + +int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) +{ + struct machine *machine = machines__findnew(machines, pid); + + if (machine == NULL) + return -1; + + return machine__create_kernel_maps(machine); +} + +int machine__load_kallsyms(struct machine *machine, const char *filename, + enum map_type type, symbol_filter_t filter) +{ + struct map *map = machine->vmlinux_maps[type]; + int ret = dso__load_kallsyms(map->dso, filename, map, filter); + + if (ret > 0) { + dso__set_loaded(map->dso, type); + /* + * Since /proc/kallsyms will have multiple sessions for the + * kernel, with modules between them, fixup the end of all + * sections. + */ + __map_groups__fixup_end(&machine->kmaps, type); + } + + return ret; +} + +int machine__load_vmlinux_path(struct machine *machine, enum map_type type, + symbol_filter_t filter) +{ + struct map *map = machine->vmlinux_maps[type]; + int ret = dso__load_vmlinux_path(map->dso, map, filter); + + if (ret > 0) { + dso__set_loaded(map->dso, type); + map__reloc_vmlinux(map); + } + + return ret; +} + +static void map_groups__fixup_end(struct map_groups *mg) +{ + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) + __map_groups__fixup_end(mg, i); +} + +static char *get_kernel_version(const char *root_dir) +{ + char version[PATH_MAX]; + FILE *file; + char *name, *tmp; + const char *prefix = "Linux version "; + + sprintf(version, "%s/proc/version", root_dir); + file = fopen(version, "r"); + if (!file) + return NULL; + + version[0] = '\0'; + tmp = fgets(version, sizeof(version), file); + fclose(file); + + name = strstr(version, prefix); + if (!name) + return NULL; + name += strlen(prefix); + tmp = strchr(name, ' '); + if (tmp) + *tmp = '\0'; + + return strdup(name); +} + +static int map_groups__set_modules_path_dir(struct map_groups *mg, + const char *dir_name) +{ + struct dirent *dent; + DIR *dir = opendir(dir_name); + int ret = 0; + + if (!dir) { + pr_debug("%s: cannot open %s dir\n", __func__, dir_name); + return -1; + } + + while ((dent = readdir(dir)) != NULL) { + char path[PATH_MAX]; + struct stat st; + + /*sshfs might return bad dent->d_type, so we have to stat*/ + snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); + if (stat(path, &st)) + continue; + + if (S_ISDIR(st.st_mode)) { + if (!strcmp(dent->d_name, ".") || + !strcmp(dent->d_name, "..")) + continue; + + ret = map_groups__set_modules_path_dir(mg, path); + if (ret < 0) + goto out; + } else { + char *dot = strrchr(dent->d_name, '.'), + dso_name[PATH_MAX]; + struct map *map; + char *long_name; + + if (dot == NULL || strcmp(dot, ".ko")) + continue; + snprintf(dso_name, sizeof(dso_name), "[%.*s]", + (int)(dot - dent->d_name), dent->d_name); + + strxfrchar(dso_name, '-', '_'); + map = map_groups__find_by_name(mg, MAP__FUNCTION, + dso_name); + if (map == NULL) + continue; + + long_name = strdup(path); + if (long_name == NULL) { + ret = -1; + goto out; + } + dso__set_long_name(map->dso, long_name); + map->dso->lname_alloc = 1; + dso__kernel_module_get_build_id(map->dso, ""); + } + } + +out: + closedir(dir); + return ret; +} + +static int machine__set_modules_path(struct machine *machine) +{ + char *version; + char modules_path[PATH_MAX]; + + version = get_kernel_version(machine->root_dir); + if (!version) + return -1; + + snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", + machine->root_dir, version); + free(version); + + return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); +} + +static int machine__create_modules(struct machine *machine) +{ + char *line = NULL; + size_t n; + FILE *file; + struct map *map; + const char *modules; + char path[PATH_MAX]; + + if (machine__is_default_guest(machine)) + modules = symbol_conf.default_guest_modules; + else { + sprintf(path, "%s/proc/modules", machine->root_dir); + modules = path; + } + + if (symbol__restricted_filename(path, "/proc/modules")) + return -1; + + file = fopen(modules, "r"); + if (file == NULL) + return -1; + + while (!feof(file)) { + char name[PATH_MAX]; + u64 start; + char *sep; + int line_len; + + line_len = getline(&line, &n, file); + if (line_len < 0) + break; + + if (!line) + goto out_failure; + + line[--line_len] = '\0'; /* \n */ + + sep = strrchr(line, 'x'); + if (sep == NULL) + continue; + + hex2u64(sep + 1, &start); + + sep = strchr(line, ' '); + if (sep == NULL) + continue; + + *sep = '\0'; + + snprintf(name, sizeof(name), "[%s]", line); + map = machine__new_module(machine, start, name); + if (map == NULL) + goto out_delete_line; + dso__kernel_module_get_build_id(map->dso, machine->root_dir); + } + + free(line); + fclose(file); + + return machine__set_modules_path(machine); + +out_delete_line: + free(line); +out_failure: + return -1; +} + +int machine__create_kernel_maps(struct machine *machine) +{ + struct dso *kernel = machine__get_kernel(machine); + + if (kernel == NULL || + __machine__create_kernel_maps(machine, kernel) < 0) + return -1; + + if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { + if (machine__is_host(machine)) + pr_debug("Problems creating module maps, " + "continuing anyway...\n"); + else + pr_debug("Problems creating module maps for guest %d, " + "continuing anyway...\n", machine->pid); + } + + /* + * Now that we have all the maps created, just set the ->end of them: + */ + map_groups__fixup_end(&machine->kmaps); + return 0; +} + static void machine__set_kernel_mmap_len(struct machine *machine, union perf_event *event) { @@ -462,3 +1018,189 @@ int machine__process_event(struct machine *machine, union perf_event *event) return ret; } + +void machine__remove_thread(struct machine *machine, struct thread *th) +{ + machine->last_match = NULL; + rb_erase(&th->rb_node, &machine->threads); + /* + * We may have references to this thread, for instance in some hist_entry + * instances, so just move them to a separate list. + */ + list_add_tail(&th->node, &machine->dead_threads); +} + +static bool symbol__match_parent_regex(struct symbol *sym) +{ + if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) + return 1; + + return 0; +} + +static const u8 cpumodes[] = { + PERF_RECORD_MISC_USER, + PERF_RECORD_MISC_KERNEL, + PERF_RECORD_MISC_GUEST_USER, + PERF_RECORD_MISC_GUEST_KERNEL +}; +#define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) + +static void ip__resolve_ams(struct machine *machine, struct thread *thread, + struct addr_map_symbol *ams, + u64 ip) +{ + struct addr_location al; + size_t i; + u8 m; + + memset(&al, 0, sizeof(al)); + + for (i = 0; i < NCPUMODES; i++) { + m = cpumodes[i]; + /* + * We cannot use the header.misc hint to determine whether a + * branch stack address is user, kernel, guest, hypervisor. + * Branches may straddle the kernel/user/hypervisor boundaries. + * Thus, we have to try consecutively until we find a match + * or else, the symbol is unknown + */ + thread__find_addr_location(thread, machine, m, MAP__FUNCTION, + ip, &al, NULL); + if (al.sym) + goto found; + } +found: + ams->addr = ip; + ams->al_addr = al.addr; + ams->sym = al.sym; + ams->map = al.map; +} + +struct branch_info *machine__resolve_bstack(struct machine *machine, + struct thread *thr, + struct branch_stack *bs) +{ + struct branch_info *bi; + unsigned int i; + + bi = calloc(bs->nr, sizeof(struct branch_info)); + if (!bi) + return NULL; + + for (i = 0; i < bs->nr; i++) { + ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to); + ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from); + bi[i].flags = bs->entries[i].flags; + } + return bi; +} + +static int machine__resolve_callchain_sample(struct machine *machine, + struct thread *thread, + struct ip_callchain *chain, + struct symbol **parent) + +{ + u8 cpumode = PERF_RECORD_MISC_USER; + unsigned int i; + int err; + + callchain_cursor_reset(&callchain_cursor); + + if (chain->nr > PERF_MAX_STACK_DEPTH) { + pr_warning("corrupted callchain. skipping...\n"); + return 0; + } + + for (i = 0; i < chain->nr; i++) { + u64 ip; + struct addr_location al; + + if (callchain_param.order == ORDER_CALLEE) + ip = chain->ips[i]; + else + ip = chain->ips[chain->nr - i - 1]; + + if (ip >= PERF_CONTEXT_MAX) { + switch (ip) { + case PERF_CONTEXT_HV: + cpumode = PERF_RECORD_MISC_HYPERVISOR; + break; + case PERF_CONTEXT_KERNEL: + cpumode = PERF_RECORD_MISC_KERNEL; + break; + case PERF_CONTEXT_USER: + cpumode = PERF_RECORD_MISC_USER; + break; + default: + pr_debug("invalid callchain context: " + "%"PRId64"\n", (s64) ip); + /* + * It seems the callchain is corrupted. + * Discard all. + */ + callchain_cursor_reset(&callchain_cursor); + return 0; + } + continue; + } + + al.filtered = false; + thread__find_addr_location(thread, machine, cpumode, + MAP__FUNCTION, ip, &al, NULL); + if (al.sym != NULL) { + if (sort__has_parent && !*parent && + symbol__match_parent_regex(al.sym)) + *parent = al.sym; + if (!symbol_conf.use_callchain) + break; + } + + err = callchain_cursor_append(&callchain_cursor, + ip, al.map, al.sym); + if (err) + return err; + } + + return 0; +} + +static int unwind_entry(struct unwind_entry *entry, void *arg) +{ + struct callchain_cursor *cursor = arg; + return callchain_cursor_append(cursor, entry->ip, + entry->map, entry->sym); +} + +int machine__resolve_callchain(struct machine *machine, + struct perf_evsel *evsel, + struct thread *thread, + struct perf_sample *sample, + struct symbol **parent) + +{ + int ret; + + callchain_cursor_reset(&callchain_cursor); + + ret = machine__resolve_callchain_sample(machine, thread, + sample->callchain, parent); + if (ret) + return ret; + + /* Can we do dwarf post unwind? */ + if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && + (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) + return 0; + + /* Bail out if nothing was captured. */ + if ((!sample->user_regs.regs) || + (!sample->user_stack.size)) + return 0; + + return unwind__get_entries(unwind_entry, &callchain_cursor, machine, + thread, evsel->attr.sample_regs_user, + sample); + +} diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 646ad13005d3..e11236878ec1 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -61,9 +61,10 @@ char *machine__mmap_name(struct machine *machine, char *bf, size_t size); int machine__init(struct machine *machine, const char *root_dir, pid_t pid); void machine__exit(struct machine *machine); +void machine__delete_dead_threads(struct machine *machine); +void machine__delete_threads(struct machine *machine); void machine__delete(struct machine *machine); - struct branch_info *machine__resolve_bstack(struct machine *machine, struct thread *thread, struct branch_stack *bs); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8d04dfbc205c..76d6e257b8a4 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -16,7 +16,6 @@ #include "cpumap.h" #include "event-parse.h" #include "perf_regs.h" -#include "unwind.h" #include "vdso.h" static int perf_session__open(struct perf_session *self, bool force) @@ -162,34 +161,11 @@ out_delete: return NULL; } -static void machine__delete_dead_threads(struct machine *machine) -{ - struct thread *n, *t; - - list_for_each_entry_safe(t, n, &machine->dead_threads, node) { - list_del(&t->node); - thread__delete(t); - } -} - static void perf_session__delete_dead_threads(struct perf_session *session) { machine__delete_dead_threads(&session->host_machine); } -static void machine__delete_threads(struct machine *self) -{ - struct rb_node *nd = rb_first(&self->threads); - - while (nd) { - struct thread *t = rb_entry(nd, struct thread, rb_node); - - rb_erase(&t->rb_node, &self->threads); - nd = rb_next(nd); - thread__delete(t); - } -} - static void perf_session__delete_threads(struct perf_session *session) { machine__delete_threads(&session->host_machine); @@ -223,192 +199,6 @@ void perf_session__delete(struct perf_session *self) vdso__exit(); } -void machine__remove_thread(struct machine *self, struct thread *th) -{ - self->last_match = NULL; - rb_erase(&th->rb_node, &self->threads); - /* - * We may have references to this thread, for instance in some hist_entry - * instances, so just move them to a separate list. - */ - list_add_tail(&th->node, &self->dead_threads); -} - -static bool symbol__match_parent_regex(struct symbol *sym) -{ - if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) - return 1; - - return 0; -} - -static const u8 cpumodes[] = { - PERF_RECORD_MISC_USER, - PERF_RECORD_MISC_KERNEL, - PERF_RECORD_MISC_GUEST_USER, - PERF_RECORD_MISC_GUEST_KERNEL -}; -#define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) - -static void ip__resolve_ams(struct machine *self, struct thread *thread, - struct addr_map_symbol *ams, - u64 ip) -{ - struct addr_location al; - size_t i; - u8 m; - - memset(&al, 0, sizeof(al)); - - for (i = 0; i < NCPUMODES; i++) { - m = cpumodes[i]; - /* - * We cannot use the header.misc hint to determine whether a - * branch stack address is user, kernel, guest, hypervisor. - * Branches may straddle the kernel/user/hypervisor boundaries. - * Thus, we have to try consecutively until we find a match - * or else, the symbol is unknown - */ - thread__find_addr_location(thread, self, m, MAP__FUNCTION, - ip, &al, NULL); - if (al.sym) - goto found; - } -found: - ams->addr = ip; - ams->al_addr = al.addr; - ams->sym = al.sym; - ams->map = al.map; -} - -struct branch_info *machine__resolve_bstack(struct machine *self, - struct thread *thr, - struct branch_stack *bs) -{ - struct branch_info *bi; - unsigned int i; - - bi = calloc(bs->nr, sizeof(struct branch_info)); - if (!bi) - return NULL; - - for (i = 0; i < bs->nr; i++) { - ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to); - ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from); - bi[i].flags = bs->entries[i].flags; - } - return bi; -} - -static int machine__resolve_callchain_sample(struct machine *machine, - struct thread *thread, - struct ip_callchain *chain, - struct symbol **parent) - -{ - u8 cpumode = PERF_RECORD_MISC_USER; - unsigned int i; - int err; - - callchain_cursor_reset(&callchain_cursor); - - if (chain->nr > PERF_MAX_STACK_DEPTH) { - pr_warning("corrupted callchain. skipping...\n"); - return 0; - } - - for (i = 0; i < chain->nr; i++) { - u64 ip; - struct addr_location al; - - if (callchain_param.order == ORDER_CALLEE) - ip = chain->ips[i]; - else - ip = chain->ips[chain->nr - i - 1]; - - if (ip >= PERF_CONTEXT_MAX) { - switch (ip) { - case PERF_CONTEXT_HV: - cpumode = PERF_RECORD_MISC_HYPERVISOR; - break; - case PERF_CONTEXT_KERNEL: - cpumode = PERF_RECORD_MISC_KERNEL; - break; - case PERF_CONTEXT_USER: - cpumode = PERF_RECORD_MISC_USER; - break; - default: - pr_debug("invalid callchain context: " - "%"PRId64"\n", (s64) ip); - /* - * It seems the callchain is corrupted. - * Discard all. - */ - callchain_cursor_reset(&callchain_cursor); - return 0; - } - continue; - } - - al.filtered = false; - thread__find_addr_location(thread, machine, cpumode, - MAP__FUNCTION, ip, &al, NULL); - if (al.sym != NULL) { - if (sort__has_parent && !*parent && - symbol__match_parent_regex(al.sym)) - *parent = al.sym; - if (!symbol_conf.use_callchain) - break; - } - - err = callchain_cursor_append(&callchain_cursor, - ip, al.map, al.sym); - if (err) - return err; - } - - return 0; -} - -static int unwind_entry(struct unwind_entry *entry, void *arg) -{ - struct callchain_cursor *cursor = arg; - return callchain_cursor_append(cursor, entry->ip, - entry->map, entry->sym); -} - -int machine__resolve_callchain(struct machine *machine, - struct perf_evsel *evsel, - struct thread *thread, - struct perf_sample *sample, - struct symbol **parent) - -{ - int ret; - - callchain_cursor_reset(&callchain_cursor); - - ret = machine__resolve_callchain_sample(machine, thread, - sample->callchain, parent); - if (ret) - return ret; - - /* Can we do dwarf post unwind? */ - if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && - (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) - return 0; - - /* Bail out if nothing was captured. */ - if ((!sample->user_regs.regs) || - (!sample->user_stack.size)) - return 0; - - return unwind__get_entries(unwind_entry, &callchain_cursor, machine, - thread, evsel->attr.sample_regs_user, - sample); - -} - static int process_event_synth_tracing_data_stub(union perf_event *event __maybe_unused, struct perf_session *session diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e5ba9840ac22..2960284d6ae1 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -28,8 +28,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, symbol_filter_t filter); static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, symbol_filter_t filter); -static int vmlinux_path__nr_entries; -static char **vmlinux_path; +int vmlinux_path__nr_entries; +char **vmlinux_path; struct symbol_conf symbol_conf = { .exclude_other = true, @@ -202,13 +202,6 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) curr->end = ~0ULL; } -static void map_groups__fixup_end(struct map_groups *mg) -{ - int i; - for (i = 0; i < MAP__NR_TYPES; ++i) - __map_groups__fixup_end(mg, i); -} - struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) { size_t namelen = strlen(name) + 1; @@ -652,8 +645,8 @@ discard_symbol: rb_erase(&pos->rb_node, root); return count + moved; } -static bool symbol__restricted_filename(const char *filename, - const char *restricted_filename) +bool symbol__restricted_filename(const char *filename, + const char *restricted_filename) { bool restricted = false; @@ -887,200 +880,6 @@ struct map *map_groups__find_by_name(struct map_groups *mg, return NULL; } -static int map_groups__set_modules_path_dir(struct map_groups *mg, - const char *dir_name) -{ - struct dirent *dent; - DIR *dir = opendir(dir_name); - int ret = 0; - - if (!dir) { - pr_debug("%s: cannot open %s dir\n", __func__, dir_name); - return -1; - } - - while ((dent = readdir(dir)) != NULL) { - char path[PATH_MAX]; - struct stat st; - - /*sshfs might return bad dent->d_type, so we have to stat*/ - snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); - if (stat(path, &st)) - continue; - - if (S_ISDIR(st.st_mode)) { - if (!strcmp(dent->d_name, ".") || - !strcmp(dent->d_name, "..")) - continue; - - ret = map_groups__set_modules_path_dir(mg, path); - if (ret < 0) - goto out; - } else { - char *dot = strrchr(dent->d_name, '.'), - dso_name[PATH_MAX]; - struct map *map; - char *long_name; - - if (dot == NULL || strcmp(dot, ".ko")) - continue; - snprintf(dso_name, sizeof(dso_name), "[%.*s]", - (int)(dot - dent->d_name), dent->d_name); - - strxfrchar(dso_name, '-', '_'); - map = map_groups__find_by_name(mg, MAP__FUNCTION, - dso_name); - if (map == NULL) - continue; - - long_name = strdup(path); - if (long_name == NULL) { - ret = -1; - goto out; - } - dso__set_long_name(map->dso, long_name); - map->dso->lname_alloc = 1; - dso__kernel_module_get_build_id(map->dso, ""); - } - } - -out: - closedir(dir); - return ret; -} - -static char *get_kernel_version(const char *root_dir) -{ - char version[PATH_MAX]; - FILE *file; - char *name, *tmp; - const char *prefix = "Linux version "; - - sprintf(version, "%s/proc/version", root_dir); - file = fopen(version, "r"); - if (!file) - return NULL; - - version[0] = '\0'; - tmp = fgets(version, sizeof(version), file); - fclose(file); - - name = strstr(version, prefix); - if (!name) - return NULL; - name += strlen(prefix); - tmp = strchr(name, ' '); - if (tmp) - *tmp = '\0'; - - return strdup(name); -} - -static int machine__set_modules_path(struct machine *machine) -{ - char *version; - char modules_path[PATH_MAX]; - - version = get_kernel_version(machine->root_dir); - if (!version) - return -1; - - snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", - machine->root_dir, version); - free(version); - - return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); -} - -struct map *machine__new_module(struct machine *machine, u64 start, - const char *filename) -{ - struct map *map; - struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); - - if (dso == NULL) - return NULL; - - map = map__new2(start, dso, MAP__FUNCTION); - if (map == NULL) - return NULL; - - if (machine__is_host(machine)) - dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; - else - dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; - map_groups__insert(&machine->kmaps, map); - return map; -} - -static int machine__create_modules(struct machine *machine) -{ - char *line = NULL; - size_t n; - FILE *file; - struct map *map; - const char *modules; - char path[PATH_MAX]; - - if (machine__is_default_guest(machine)) - modules = symbol_conf.default_guest_modules; - else { - sprintf(path, "%s/proc/modules", machine->root_dir); - modules = path; - } - - if (symbol__restricted_filename(path, "/proc/modules")) - return -1; - - file = fopen(modules, "r"); - if (file == NULL) - return -1; - - while (!feof(file)) { - char name[PATH_MAX]; - u64 start; - char *sep; - int line_len; - - line_len = getline(&line, &n, file); - if (line_len < 0) - break; - - if (!line) - goto out_failure; - - line[--line_len] = '\0'; /* \n */ - - sep = strrchr(line, 'x'); - if (sep == NULL) - continue; - - hex2u64(sep + 1, &start); - - sep = strchr(line, ' '); - if (sep == NULL) - continue; - - *sep = '\0'; - - snprintf(name, sizeof(name), "[%s]", line); - map = machine__new_module(machine, start, name); - if (map == NULL) - goto out_delete_line; - dso__kernel_module_get_build_id(map->dso, machine->root_dir); - } - - free(line); - fclose(file); - - return machine__set_modules_path(machine); - -out_delete_line: - free(line); -out_failure: - return -1; -} - int dso__load_vmlinux(struct dso *dso, struct map *map, const char *vmlinux, symbol_filter_t filter) { @@ -1300,195 +1099,6 @@ out_try_fixup: return err; } -size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) -{ - struct rb_node *nd; - size_t ret = 0; - - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret += __dsos__fprintf(&pos->kernel_dsos, fp); - ret += __dsos__fprintf(&pos->user_dsos, fp); - } - - return ret; -} - -size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, - bool (skip)(struct dso *dso, int parm), int parm) -{ - return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) + - __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm); -} - -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp, - bool (skip)(struct dso *dso, int parm), int parm) -{ - struct rb_node *nd; - size_t ret = 0; - - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); - } - return ret; -} - -static struct dso *machine__get_kernel(struct machine *machine) -{ - const char *vmlinux_name = NULL; - struct dso *kernel; - - if (machine__is_host(machine)) { - vmlinux_name = symbol_conf.vmlinux_name; - if (!vmlinux_name) - vmlinux_name = "[kernel.kallsyms]"; - - kernel = dso__kernel_findnew(machine, vmlinux_name, - "[kernel]", - DSO_TYPE_KERNEL); - } else { - char bf[PATH_MAX]; - - if (machine__is_default_guest(machine)) - vmlinux_name = symbol_conf.default_guest_vmlinux_name; - if (!vmlinux_name) - vmlinux_name = machine__mmap_name(machine, bf, - sizeof(bf)); - - kernel = dso__kernel_findnew(machine, vmlinux_name, - "[guest.kernel]", - DSO_TYPE_GUEST_KERNEL); - } - - if (kernel != NULL && (!kernel->has_build_id)) - dso__read_running_kernel_build_id(kernel, machine); - - return kernel; -} - -struct process_args { - u64 start; -}; - -static int symbol__in_kernel(void *arg, const char *name, - char type __maybe_unused, u64 start) -{ - struct process_args *args = arg; - - if (strchr(name, '[')) - return 0; - - args->start = start; - return 1; -} - -/* Figure out the start address of kernel map from /proc/kallsyms */ -static u64 machine__get_kernel_start_addr(struct machine *machine) -{ - const char *filename; - char path[PATH_MAX]; - struct process_args args; - - if (machine__is_host(machine)) { - filename = "/proc/kallsyms"; - } else { - if (machine__is_default_guest(machine)) - filename = (char *)symbol_conf.default_guest_kallsyms; - else { - sprintf(path, "%s/proc/kallsyms", machine->root_dir); - filename = path; - } - } - - if (symbol__restricted_filename(filename, "/proc/kallsyms")) - return 0; - - if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) - return 0; - - return args.start; -} - -int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) -{ - enum map_type type; - u64 start = machine__get_kernel_start_addr(machine); - - for (type = 0; type < MAP__NR_TYPES; ++type) { - struct kmap *kmap; - - machine->vmlinux_maps[type] = map__new2(start, kernel, type); - if (machine->vmlinux_maps[type] == NULL) - return -1; - - machine->vmlinux_maps[type]->map_ip = - machine->vmlinux_maps[type]->unmap_ip = - identity__map_ip; - kmap = map__kmap(machine->vmlinux_maps[type]); - kmap->kmaps = &machine->kmaps; - map_groups__insert(&machine->kmaps, - machine->vmlinux_maps[type]); - } - - return 0; -} - -void machine__destroy_kernel_maps(struct machine *machine) -{ - enum map_type type; - - for (type = 0; type < MAP__NR_TYPES; ++type) { - struct kmap *kmap; - - if (machine->vmlinux_maps[type] == NULL) - continue; - - kmap = map__kmap(machine->vmlinux_maps[type]); - map_groups__remove(&machine->kmaps, - machine->vmlinux_maps[type]); - if (kmap->ref_reloc_sym) { - /* - * ref_reloc_sym is shared among all maps, so free just - * on one of them. - */ - if (type == MAP__FUNCTION) { - free((char *)kmap->ref_reloc_sym->name); - kmap->ref_reloc_sym->name = NULL; - free(kmap->ref_reloc_sym); - } - kmap->ref_reloc_sym = NULL; - } - - map__delete(machine->vmlinux_maps[type]); - machine->vmlinux_maps[type] = NULL; - } -} - -int machine__create_kernel_maps(struct machine *machine) -{ - struct dso *kernel = machine__get_kernel(machine); - - if (kernel == NULL || - __machine__create_kernel_maps(machine, kernel) < 0) - return -1; - - if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { - if (machine__is_host(machine)) - pr_debug("Problems creating module maps, " - "continuing anyway...\n"); - else - pr_debug("Problems creating module maps for guest %d, " - "continuing anyway...\n", machine->pid); - } - - /* - * Now that we have all the maps created, just set the ->end of them: - */ - map_groups__fixup_end(&machine->kmaps); - return 0; -} - static void vmlinux_path__exit(void) { while (--vmlinux_path__nr_entries >= 0) { @@ -1549,25 +1159,6 @@ out_fail: return -1; } -size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) -{ - int i; - size_t printed = 0; - struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; - - if (kdso->has_build_id) { - char filename[PATH_MAX]; - if (dso__build_id_filename(kdso, filename, sizeof(filename))) - printed += fprintf(fp, "[0] %s\n", filename); - } - - for (i = 0; i < vmlinux_path__nr_entries; ++i) - printed += fprintf(fp, "[%d] %s\n", - i + kdso->has_build_id, vmlinux_path[i]); - - return printed; -} - static int setup_list(struct strlist **list, const char *list_str, const char *list_name) { @@ -1671,108 +1262,3 @@ void symbol__exit(void) symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; symbol_conf.initialized = false; } - -int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) -{ - struct machine *machine = machines__findnew(machines, pid); - - if (machine == NULL) - return -1; - - return machine__create_kernel_maps(machine); -} - -int machines__create_guest_kernel_maps(struct rb_root *machines) -{ - int ret = 0; - struct dirent **namelist = NULL; - int i, items = 0; - char path[PATH_MAX]; - pid_t pid; - char *endp; - - if (symbol_conf.default_guest_vmlinux_name || - symbol_conf.default_guest_modules || - symbol_conf.default_guest_kallsyms) { - machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); - } - - if (symbol_conf.guestmount) { - items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); - if (items <= 0) - return -ENOENT; - for (i = 0; i < items; i++) { - if (!isdigit(namelist[i]->d_name[0])) { - /* Filter out . and .. */ - continue; - } - pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); - if ((*endp != '\0') || - (endp == namelist[i]->d_name) || - (errno == ERANGE)) { - pr_debug("invalid directory (%s). Skipping.\n", - namelist[i]->d_name); - continue; - } - sprintf(path, "%s/%s/proc/kallsyms", - symbol_conf.guestmount, - namelist[i]->d_name); - ret = access(path, R_OK); - if (ret) { - pr_debug("Can't access file %s\n", path); - goto failure; - } - machines__create_kernel_maps(machines, pid); - } -failure: - free(namelist); - } - - return ret; -} - -void machines__destroy_guest_kernel_maps(struct rb_root *machines) -{ - struct rb_node *next = rb_first(machines); - - while (next) { - struct machine *pos = rb_entry(next, struct machine, rb_node); - - next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, machines); - machine__delete(pos); - } -} - -int machine__load_kallsyms(struct machine *machine, const char *filename, - enum map_type type, symbol_filter_t filter) -{ - struct map *map = machine->vmlinux_maps[type]; - int ret = dso__load_kallsyms(map->dso, filename, map, filter); - - if (ret > 0) { - dso__set_loaded(map->dso, type); - /* - * Since /proc/kallsyms will have multiple sessions for the - * kernel, with modules between them, fixup the end of all - * sections. - */ - __map_groups__fixup_end(&machine->kmaps, type); - } - - return ret; -} - -int machine__load_vmlinux_path(struct machine *machine, enum map_type type, - symbol_filter_t filter) -{ - struct map *map = machine->vmlinux_maps[type]; - int ret = dso__load_vmlinux_path(map->dso, map, filter); - - if (ret > 0) { - dso__set_loaded(map->dso, type); - map__reloc_vmlinux(map); - } - - return ret; -} diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index de68f98b236d..ec7b2405c377 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -120,6 +120,8 @@ struct symbol_conf { }; extern struct symbol_conf symbol_conf; +extern int vmlinux_path__nr_entries; +extern char **vmlinux_path; static inline void *symbol__priv(struct symbol *sym) { @@ -223,6 +225,8 @@ size_t symbol__fprintf_symname_offs(const struct symbol *sym, size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); size_t symbol__fprintf(struct symbol *sym, FILE *fp); bool symbol_type__is_a(char symbol_type, enum map_type map_type); +bool symbol__restricted_filename(const char *filename, + const char *restricted_filename); int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, struct symsrc *runtime_ss, symbol_filter_t filter, diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index df59623ac763..632e40e5ceca 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -54,10 +54,10 @@ int thread__comm_len(struct thread *self) return self->comm_len; } -static size_t thread__fprintf(struct thread *self, FILE *fp) +size_t thread__fprintf(struct thread *thread, FILE *fp) { - return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) + - map_groups__fprintf(&self->mg, verbose, fp); + return fprintf(fp, "Thread %d %s\n", thread->pid, thread->comm) + + map_groups__fprintf(&thread->mg, verbose, fp); } void thread__insert_map(struct thread *self, struct map *map) @@ -84,17 +84,3 @@ int thread__fork(struct thread *self, struct thread *parent) return -ENOMEM; return 0; } - -size_t machine__fprintf(struct machine *machine, FILE *fp) -{ - size_t ret = 0; - struct rb_node *nd; - - for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { - struct thread *pos = rb_entry(nd, struct thread, rb_node); - - ret += thread__fprintf(pos, fp); - } - - return ret; -} diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index f2fa17caa7d5..5ad266403098 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -30,6 +30,7 @@ int thread__set_comm(struct thread *self, const char *comm); int thread__comm_len(struct thread *self); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); +size_t thread__fprintf(struct thread *thread, FILE *fp); static inline struct map *thread__find_map(struct thread *self, enum map_type type, u64 addr) -- cgit v1.2.3-59-g8ed1b From 7be5ebe8767eaa482e18f566de5f56c1519abf59 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 10 Dec 2012 14:53:43 -0300 Subject: perf evsel: Update sample_size when setting sample_type bits We use evsel->sample_size to detect underflows in perf_evsel__parse_sample, but we were failing to update it after perf_evsel__init(), i.e. when we decide, after creating an evsel, that we want some extra field bit set. Fix it by introducing methods to set a bit that will take care of correctly adjusting evsel->sample_size. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-2ny5pzsing0dcth7hws48x9c@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 2 +- tools/perf/builtin-top.c | 11 ++++++----- tools/perf/tests/perf-record.c | 6 +++--- tools/perf/util/evlist.c | 2 +- tools/perf/util/evsel.c | 45 ++++++++++++++++++++++++++++++------------ tools/perf/util/evsel.h | 11 +++++++++++ 6 files changed, 54 insertions(+), 23 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index f3151d3c70ce..0be6605db9ea 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -286,7 +286,7 @@ try_again: */ opts->sample_id_all_missing = true; if (!opts->sample_time && !opts->raw_samples && !time_needed) - attr->sample_type &= ~PERF_SAMPLE_TIME; + perf_evsel__reset_sample_bit(pos, TIME); goto retry_sample_id; } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 987e1b8a9c2e..31a7c51aac76 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -901,24 +901,25 @@ static void perf_top__start_counters(struct perf_top *top) list_for_each_entry(counter, &evlist->entries, node) { struct perf_event_attr *attr = &counter->attr; - attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + perf_evsel__set_sample_bit(counter, IP); + perf_evsel__set_sample_bit(counter, TID); if (top->freq) { - attr->sample_type |= PERF_SAMPLE_PERIOD; + perf_evsel__set_sample_bit(counter, PERIOD); attr->freq = 1; attr->sample_freq = top->freq; } if (evlist->nr_entries > 1) { - attr->sample_type |= PERF_SAMPLE_ID; + perf_evsel__set_sample_bit(counter, ID); attr->read_format |= PERF_FORMAT_ID; } if (perf_target__has_cpu(&top->target)) - attr->sample_type |= PERF_SAMPLE_CPU; + perf_evsel__set_sample_bit(counter, CPU); if (symbol_conf.use_callchain) - attr->sample_type |= PERF_SAMPLE_CALLCHAIN; + perf_evsel__set_sample_bit(counter, CALLCHAIN); attr->mmap = 1; attr->comm = 1; diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c index 70e0d4421df8..5902772492b6 100644 --- a/tools/perf/tests/perf-record.c +++ b/tools/perf/tests/perf-record.c @@ -103,9 +103,9 @@ int test__PERF_RECORD(void) * Config the evsels, setting attr->comm on the first one, etc. */ evsel = perf_evlist__first(evlist); - evsel->attr.sample_type |= PERF_SAMPLE_CPU; - evsel->attr.sample_type |= PERF_SAMPLE_TID; - evsel->attr.sample_type |= PERF_SAMPLE_TIME; + perf_evsel__set_sample_bit(evsel, CPU); + perf_evsel__set_sample_bit(evsel, TID); + perf_evsel__set_sample_bit(evsel, TIME); perf_evlist__config_attrs(evlist, &opts); err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index d0e1e821fe85..265565942ae5 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -61,7 +61,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist, perf_evsel__config(evsel, opts); if (evlist->nr_entries > 1) - evsel->attr.sample_type |= PERF_SAMPLE_ID; + perf_evsel__set_sample_bit(evsel, ID); } } diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index bb58b05f905f..fc80f5a32fa6 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -50,6 +50,24 @@ void hists__init(struct hists *hists) pthread_mutex_init(&hists->lock, NULL); } +void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit) +{ + if (!(evsel->attr.sample_type & bit)) { + evsel->attr.sample_type |= bit; + evsel->sample_size += sizeof(u64); + } +} + +void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit) +{ + if (evsel->attr.sample_type & bit) { + evsel->attr.sample_type &= ~bit; + evsel->sample_size -= sizeof(u64); + } +} + void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, int idx) { @@ -445,7 +463,8 @@ void perf_evsel__config(struct perf_evsel *evsel, PERF_FORMAT_TOTAL_TIME_RUNNING | PERF_FORMAT_ID; - attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; + perf_evsel__set_sample_bit(evsel, IP); + perf_evsel__set_sample_bit(evsel, TID); /* * We default some events to a 1 default interval. But keep @@ -454,7 +473,7 @@ void perf_evsel__config(struct perf_evsel *evsel, if (!attr->sample_period || (opts->user_freq != UINT_MAX && opts->user_interval != ULLONG_MAX)) { if (opts->freq) { - attr->sample_type |= PERF_SAMPLE_PERIOD; + perf_evsel__set_sample_bit(evsel, PERIOD); attr->freq = 1; attr->sample_freq = opts->freq; } else { @@ -469,16 +488,16 @@ void perf_evsel__config(struct perf_evsel *evsel, attr->inherit_stat = 1; if (opts->sample_address) { - attr->sample_type |= PERF_SAMPLE_ADDR; + perf_evsel__set_sample_bit(evsel, ADDR); attr->mmap_data = track; } if (opts->call_graph) { - attr->sample_type |= PERF_SAMPLE_CALLCHAIN; + perf_evsel__set_sample_bit(evsel, CALLCHAIN); if (opts->call_graph == CALLCHAIN_DWARF) { - attr->sample_type |= PERF_SAMPLE_REGS_USER | - PERF_SAMPLE_STACK_USER; + perf_evsel__set_sample_bit(evsel, REGS_USER); + perf_evsel__set_sample_bit(evsel, STACK_USER); attr->sample_regs_user = PERF_REGS_MASK; attr->sample_stack_user = opts->stack_dump_size; attr->exclude_callchain_user = 1; @@ -486,20 +505,20 @@ void perf_evsel__config(struct perf_evsel *evsel, } if (perf_target__has_cpu(&opts->target)) - attr->sample_type |= PERF_SAMPLE_CPU; + perf_evsel__set_sample_bit(evsel, CPU); if (opts->period) - attr->sample_type |= PERF_SAMPLE_PERIOD; + perf_evsel__set_sample_bit(evsel, PERIOD); if (!opts->sample_id_all_missing && (opts->sample_time || !opts->no_inherit || perf_target__has_cpu(&opts->target))) - attr->sample_type |= PERF_SAMPLE_TIME; + perf_evsel__set_sample_bit(evsel, TIME); if (opts->raw_samples) { - attr->sample_type |= PERF_SAMPLE_TIME; - attr->sample_type |= PERF_SAMPLE_RAW; - attr->sample_type |= PERF_SAMPLE_CPU; + perf_evsel__set_sample_bit(evsel, TIME); + perf_evsel__set_sample_bit(evsel, RAW); + perf_evsel__set_sample_bit(evsel, CPU); } if (opts->no_delay) { @@ -507,7 +526,7 @@ void perf_evsel__config(struct perf_evsel *evsel, attr->wakeup_events = 1; } if (opts->branch_stack) { - attr->sample_type |= PERF_SAMPLE_BRANCH_STACK; + perf_evsel__set_sample_bit(evsel, BRANCH_STACK); attr->branch_sample_type = opts->branch_stack; } diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 3f7ff471de2b..739853969243 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -118,6 +118,17 @@ void perf_evsel__free_fd(struct perf_evsel *evsel); void perf_evsel__free_id(struct perf_evsel *evsel); void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); +void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit); +void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit); + +#define perf_evsel__set_sample_bit(evsel, bit) \ + __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit) + +#define perf_evsel__reset_sample_bit(evsel, bit) \ + __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit) + int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, const char *filter); -- cgit v1.2.3-59-g8ed1b From c5d3d50da2e32441d65d63ac27a67197998668c5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 10 Dec 2012 14:58:42 -0300 Subject: perf test: Fixup error reporting in basic mmap test In two cases this test could detect an error, bail out but return zero. Fix it by reporting -1 for failure. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-tjhs9v6nqpofmxv3gs5lnu2c@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/mmap-basic.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index e1746811e14b..8682ecfb4f66 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -128,6 +128,7 @@ int test__basic_mmap(void) goto out_munmap; } + err = -1; evsel = perf_evlist__id2evsel(evlist, sample.id); if (evsel == NULL) { pr_debug("event with id %" PRIu64 @@ -137,16 +138,17 @@ int test__basic_mmap(void) nr_events[evsel->idx]++; } + err = 0; list_for_each_entry(evsel, &evlist->entries, node) { if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { pr_debug("expected %d %s events, got %d\n", expected_nr_events[evsel->idx], perf_evsel__name(evsel), nr_events[evsel->idx]); + err = -1; goto out_munmap; } } - err = 0; out_munmap: perf_evlist__munmap(evlist); out_close_fd: -- cgit v1.2.3-59-g8ed1b From a60d79535c21dca4e24d08abf8ab56e2f860af71 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 10 Dec 2012 15:11:43 -0300 Subject: perf test: Use perf_evsel__newtp constructor in the tracepoint tests Removing one trace_event__id function, not used anymore. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-13p2ov2rg166y73j9uazukma@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 1 - tools/perf/tests/mmap-basic.c | 37 ++++++++++---------------------- tools/perf/tests/open-syscall-all-cpus.c | 18 +++------------- tools/perf/tests/open-syscall.c | 17 +++------------ tools/perf/tests/tests.h | 3 --- tools/perf/tests/util.c | 30 -------------------------- 6 files changed, 17 insertions(+), 89 deletions(-) delete mode 100644 tools/perf/tests/util.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 2a07b957eac9..10aa24c74a28 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -487,7 +487,6 @@ LIB_OBJS += $(OUTPUT)tests/rdpmc.o LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o LIB_OBJS += $(OUTPUT)tests/pmu.o -LIB_OBJS += $(OUTPUT)tests/util.o BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o BUILTIN_OBJS += $(OUTPUT)builtin-bench.o diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index 8682ecfb4f66..4743b6d7a09a 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -22,36 +22,16 @@ int test__basic_mmap(void) struct thread_map *threads; struct cpu_map *cpus; struct perf_evlist *evlist; - struct perf_event_attr attr = { - .type = PERF_TYPE_TRACEPOINT, - .read_format = PERF_FORMAT_ID, - .sample_type = PERF_SAMPLE_ID, - .watermark = 0, - }; cpu_set_t cpu_set; const char *syscall_names[] = { "getsid", "getppid", "getpgrp", "getpgid", }; pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, (void*)getpgid }; #define nsyscalls ARRAY_SIZE(syscall_names) - int ids[nsyscalls]; unsigned int nr_events[nsyscalls], expected_nr_events[nsyscalls], i, j; struct perf_evsel *evsels[nsyscalls], *evsel; - for (i = 0; i < nsyscalls; ++i) { - char name[64]; - - snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); - ids[i] = trace_event__id(name); - if (ids[i] < 0) { - pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); - return -1; - } - nr_events[i] = 0; - expected_nr_events[i] = random() % 257; - } - threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); @@ -79,18 +59,20 @@ int test__basic_mmap(void) goto out_free_cpus; } - /* anonymous union fields, can't be initialized above */ - attr.wakeup_events = 1; - attr.sample_period = 1; - for (i = 0; i < nsyscalls; ++i) { - attr.config = ids[i]; - evsels[i] = perf_evsel__new(&attr, i); + char name[64]; + + snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); + evsels[i] = perf_evsel__newtp("syscalls", name, i); if (evsels[i] == NULL) { pr_debug("perf_evsel__new\n"); goto out_free_evlist; } + evsels[i]->attr.wakeup_events = 1; + evsels[i]->attr.read_format |= PERF_FORMAT_ID; + perf_evsel__set_sample_bit(evsels[i], ID); + perf_evlist__add(evlist, evsels[i]); if (perf_evsel__open(evsels[i], cpus, threads) < 0) { @@ -99,6 +81,9 @@ int test__basic_mmap(void) strerror(errno)); goto out_close_fd; } + + nr_events[i] = 0; + expected_nr_events[i] = 1 + rand() % 127; } if (perf_evlist__mmap(evlist, 128, true) < 0) { diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c index 31072aba0d54..9b920a0cce79 100644 --- a/tools/perf/tests/open-syscall-all-cpus.c +++ b/tools/perf/tests/open-syscall-all-cpus.c @@ -7,20 +7,12 @@ int test__open_syscall_event_on_all_cpus(void) { int err = -1, fd, cpu; - struct thread_map *threads; struct cpu_map *cpus; struct perf_evsel *evsel; - struct perf_event_attr attr; unsigned int nr_open_calls = 111, i; cpu_set_t cpu_set; - int id = trace_event__id("sys_enter_open"); + struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); - if (id < 0) { - pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); - return -1; - } - - threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; @@ -32,15 +24,11 @@ int test__open_syscall_event_on_all_cpus(void) goto out_thread_map_delete; } - CPU_ZERO(&cpu_set); - memset(&attr, 0, sizeof(attr)); - attr.type = PERF_TYPE_TRACEPOINT; - attr.config = id; - evsel = perf_evsel__new(&attr, 0); + evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); if (evsel == NULL) { - pr_debug("perf_evsel__new\n"); + pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); goto out_thread_map_delete; } diff --git a/tools/perf/tests/open-syscall.c b/tools/perf/tests/open-syscall.c index 98be8b518b4f..befc0671f95d 100644 --- a/tools/perf/tests/open-syscall.c +++ b/tools/perf/tests/open-syscall.c @@ -6,29 +6,18 @@ int test__open_syscall_event(void) { int err = -1, fd; - struct thread_map *threads; struct perf_evsel *evsel; - struct perf_event_attr attr; unsigned int nr_open_calls = 111, i; - int id = trace_event__id("sys_enter_open"); + struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); - if (id < 0) { - pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); - return -1; - } - - threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; } - memset(&attr, 0, sizeof(attr)); - attr.type = PERF_TYPE_TRACEPOINT; - attr.config = id; - evsel = perf_evsel__new(&attr, 0); + evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); if (evsel == NULL) { - pr_debug("perf_evsel__new\n"); + pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); goto out_thread_map_delete; } diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index fc121edab016..0fd94657960e 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -16,7 +16,4 @@ int test__attr(void); int test__dso_data(void); int test__parse_events(void); -/* Util */ -int trace_event__id(const char *evname); - #endif /* TESTS_H */ diff --git a/tools/perf/tests/util.c b/tools/perf/tests/util.c deleted file mode 100644 index 748f2e8f6961..000000000000 --- a/tools/perf/tests/util.c +++ /dev/null @@ -1,30 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include "tests.h" -#include "debugfs.h" - -int trace_event__id(const char *evname) -{ - char *filename; - int err = -1, fd; - - if (asprintf(&filename, - "%s/syscalls/%s/id", - tracing_events_path, evname) < 0) - return -1; - - fd = open(filename, O_RDONLY); - if (fd >= 0) { - char id[16]; - if (read(fd, id, sizeof(id)) > 0) - err = atoi(id); - close(fd); - } - - free(filename); - return err; -} -- cgit v1.2.3-59-g8ed1b From 7a5a5ca5fe3df8636c96b49591c23baf7c415dd4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 10 Dec 2012 15:21:30 -0300 Subject: perf evsel: Introduce method to request IDs be used When mmaping multiple events we need to find the right evsel that matches an event in the ring buffer. For that we need to set the PERF_FORMAT_ID bit in perf_event_attr.read_format so that when we read the event fds we get that id to then hash it and be able later to use perf_evlist__id2evsel to find the right evsel. We also need to set the PERF_SAMPLE_ID bit in perf_event_attr.sample_type to ask for that id to be stashed in each sample, so that we can demux it. So add a perf_evsel__set_sample_id() method to do those two things in one operation. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-1z4xcmbud30lamklfe80oopu@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-top.c | 6 ++---- tools/perf/tests/mmap-basic.c | 3 +-- tools/perf/util/evsel.c | 6 ++++++ tools/perf/util/evsel.h | 2 ++ 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 31a7c51aac76..a30647487ba3 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -910,10 +910,8 @@ static void perf_top__start_counters(struct perf_top *top) attr->sample_freq = top->freq; } - if (evlist->nr_entries > 1) { - perf_evsel__set_sample_bit(counter, ID); - attr->read_format |= PERF_FORMAT_ID; - } + if (evlist->nr_entries > 1) + perf_evsel__set_sample_id(counter); if (perf_target__has_cpu(&top->target)) perf_evsel__set_sample_bit(counter, CPU); diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index 4743b6d7a09a..cdd50755af51 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -70,8 +70,7 @@ int test__basic_mmap(void) } evsels[i]->attr.wakeup_events = 1; - evsels[i]->attr.read_format |= PERF_FORMAT_ID; - perf_evsel__set_sample_bit(evsels[i], ID); + perf_evsel__set_sample_id(evsels[i]); perf_evlist__add(evlist, evsels[i]); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index fc80f5a32fa6..9dde7e933be9 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -68,6 +68,12 @@ void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, } } +void perf_evsel__set_sample_id(struct perf_evsel *evsel) +{ + perf_evsel__set_sample_bit(evsel, ID); + evsel->attr.read_format |= PERF_FORMAT_ID; +} + void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, int idx) { diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 739853969243..5c089775064f 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -129,6 +129,8 @@ void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, #define perf_evsel__reset_sample_bit(evsel, bit) \ __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit) +void perf_evsel__set_sample_id(struct perf_evsel *evsel); + int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, const char *filter); -- cgit v1.2.3-59-g8ed1b From 3a5afaec593954a399967155db47f72cb8828a6a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 10 Dec 2012 15:29:12 -0300 Subject: perf evsel: No need to always ask for PERF_FORMAT_ID in read_format Instead make perf_evlist__confir_attrs use perf_evsel__set_sample_id() when having more than one event, that way only if we have multiple events we'll ask to have the event ids returned when we read its file descriptors. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-xuho5hrrxy2ky0cjpr80hyfp@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/attr/base-record | 2 +- tools/perf/tests/attr/test-record-group | 2 ++ tools/perf/tests/attr/test-record-group1 | 2 ++ tools/perf/util/evlist.c | 2 +- tools/perf/util/evsel.c | 3 +-- 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record index f1485d8e6a0b..f9d04643109e 100644 --- a/tools/perf/tests/attr/base-record +++ b/tools/perf/tests/attr/base-record @@ -7,7 +7,7 @@ size=96 config=0 sample_period=4000 sample_type=263 -read_format=7 +read_format=3 disabled=1 inherit=1 pinned=0 diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group index a6599e9a19d3..0df34cab3e42 100644 --- a/tools/perf/tests/attr/test-record-group +++ b/tools/perf/tests/attr/test-record-group @@ -6,12 +6,14 @@ args = --group -e cycles,instructions kill >/dev/null 2>&1 fd=1 group_fd=-1 sample_type=327 +read_format=7 [event-2:base-record] fd=2 group_fd=1 config=1 sample_type=327 +read_format=7 mmap=0 comm=0 enable_on_exec=0 diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1 index 5a8359da38af..18bd926ff6cb 100644 --- a/tools/perf/tests/attr/test-record-group1 +++ b/tools/perf/tests/attr/test-record-group1 @@ -6,6 +6,7 @@ args = -e '{cycles,instructions}' kill >/tmp/krava 2>&1 fd=1 group_fd=-1 sample_type=327 +read_format=7 [event-2:base-record] fd=2 @@ -13,6 +14,7 @@ group_fd=1 type=0 config=1 sample_type=327 +read_format=7 mmap=0 comm=0 enable_on_exec=0 diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 265565942ae5..5a0878d3a038 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -61,7 +61,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist, perf_evsel__config(evsel, opts); if (evlist->nr_entries > 1) - perf_evsel__set_sample_bit(evsel, ID); + perf_evsel__set_sample_id(evsel); } } diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 9dde7e933be9..e62d3aef9cd6 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -466,8 +466,7 @@ void perf_evsel__config(struct perf_evsel *evsel, attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; attr->inherit = !opts->no_inherit; attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | - PERF_FORMAT_TOTAL_TIME_RUNNING | - PERF_FORMAT_ID; + PERF_FORMAT_TOTAL_TIME_RUNNING; perf_evsel__set_sample_bit(evsel, IP); perf_evsel__set_sample_bit(evsel, TID); -- cgit v1.2.3-59-g8ed1b From 62b807f64948f55776b1cc57298966d915b1a402 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 10 Dec 2012 16:04:40 -0300 Subject: perf evsel: No need to always ask for PERF_FORMAT_TOTAL_TIME_{ENABLED,RUNNING} This is needed, so far, just in 'perf stat', to scale counters, so don't unconditionally ask for them in the perf_evsel__config() method. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-ujpujgscq2f2oodxuso5nobc@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/attr/base-record | 2 +- tools/perf/tests/attr/test-record-group | 4 ++-- tools/perf/tests/attr/test-record-group1 | 4 ++-- tools/perf/util/evsel.c | 2 -- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record index f9d04643109e..5bc3880f7be5 100644 --- a/tools/perf/tests/attr/base-record +++ b/tools/perf/tests/attr/base-record @@ -7,7 +7,7 @@ size=96 config=0 sample_period=4000 sample_type=263 -read_format=3 +read_format=0 disabled=1 inherit=1 pinned=0 diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group index 0df34cab3e42..57739cacdb2a 100644 --- a/tools/perf/tests/attr/test-record-group +++ b/tools/perf/tests/attr/test-record-group @@ -6,14 +6,14 @@ args = --group -e cycles,instructions kill >/dev/null 2>&1 fd=1 group_fd=-1 sample_type=327 -read_format=7 +read_format=4 [event-2:base-record] fd=2 group_fd=1 config=1 sample_type=327 -read_format=7 +read_format=4 mmap=0 comm=0 enable_on_exec=0 diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1 index 18bd926ff6cb..4d688a13fd2f 100644 --- a/tools/perf/tests/attr/test-record-group1 +++ b/tools/perf/tests/attr/test-record-group1 @@ -6,7 +6,7 @@ args = -e '{cycles,instructions}' kill >/tmp/krava 2>&1 fd=1 group_fd=-1 sample_type=327 -read_format=7 +read_format=4 [event-2:base-record] fd=2 @@ -14,7 +14,7 @@ group_fd=1 type=0 config=1 sample_type=327 -read_format=7 +read_format=4 mmap=0 comm=0 enable_on_exec=0 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index e62d3aef9cd6..2492d329a5f5 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -465,8 +465,6 @@ void perf_evsel__config(struct perf_evsel *evsel, attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; attr->inherit = !opts->no_inherit; - attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | - PERF_FORMAT_TOTAL_TIME_RUNNING; perf_evsel__set_sample_bit(evsel, IP); perf_evsel__set_sample_bit(evsel, TID); -- cgit v1.2.3-59-g8ed1b From f77a951826b44b763e4d9fbd2479b6132d2bd7fc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 10 Dec 2012 16:41:31 -0300 Subject: perf evlist: Set the leader in the perf_evlist__config method Since we need to ensure the leader is set before configuring the evsel perf_event_attrs. Reducing the boilerplate needed by tools, helping, for instance, 'perf trace', that wasn't setting the leader. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-22shm0ptkch2kgl7rtqlligx@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 9 +-------- tools/perf/builtin-trace.c | 2 +- tools/perf/tests/perf-record.c | 2 +- tools/perf/util/evlist.c | 10 ++++++++-- tools/perf/util/evlist.h | 4 ++-- 5 files changed, 13 insertions(+), 14 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 0be6605db9ea..fc4f08044632 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -230,14 +230,7 @@ static int perf_record__open(struct perf_record *rec) struct perf_record_opts *opts = &rec->opts; int rc = 0; - /* - * Set the evsel leader links before we configure attributes, - * since some might depend on this info. - */ - if (opts->group) - perf_evlist__set_leader(evlist); - - perf_evlist__config_attrs(evlist, opts); + perf_evlist__config(evlist, opts); list_for_each_entry(pos, &evlist->entries, node) { struct perf_event_attr *attr = &pos->attr; diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 7932ffa29889..d222d7fc7e96 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -455,7 +455,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv) goto out_delete_evlist; } - perf_evlist__config_attrs(evlist, &trace->opts); + perf_evlist__config(evlist, &trace->opts); signal(SIGCHLD, sig_handler); signal(SIGINT, sig_handler); diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c index 5902772492b6..6ea66cf6791b 100644 --- a/tools/perf/tests/perf-record.c +++ b/tools/perf/tests/perf-record.c @@ -106,7 +106,7 @@ int test__PERF_RECORD(void) perf_evsel__set_sample_bit(evsel, CPU); perf_evsel__set_sample_bit(evsel, TID); perf_evsel__set_sample_bit(evsel, TIME); - perf_evlist__config_attrs(evlist, &opts); + perf_evlist__config(evlist, &opts); err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); if (err < 0) { diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 5a0878d3a038..dc8aee97a488 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -49,10 +49,16 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, return evlist; } -void perf_evlist__config_attrs(struct perf_evlist *evlist, - struct perf_record_opts *opts) +void perf_evlist__config(struct perf_evlist *evlist, + struct perf_record_opts *opts) { struct perf_evsel *evsel; + /* + * Set the evsel leader links before we configure attributes, + * since some might depend on this info. + */ + if (opts->group) + perf_evlist__set_leader(evlist); if (evlist->cpus->map[0] < 0) opts->no_inherit = true; diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 56003f779e60..457e2350d21d 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -76,8 +76,8 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); int perf_evlist__open(struct perf_evlist *evlist); -void perf_evlist__config_attrs(struct perf_evlist *evlist, - struct perf_record_opts *opts); +void perf_evlist__config(struct perf_evlist *evlist, + struct perf_record_opts *opts); int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct perf_record_opts *opts, -- cgit v1.2.3-59-g8ed1b From 0698aeddcfe0c2514af1d012082665a3bb55d01b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 10 Dec 2012 18:17:08 -0300 Subject: perf evsel: Adopt fprintf routine from 'perf evlist' So that we can print all the details when debugging other tools, when we have just evlists and evsels, not a perf.data file. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-mktq5fy2h5z7jyeqvvf5mbc8@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-evlist.c | 81 ++------------------------------------------- tools/perf/util/evsel.c | 77 ++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/evsel.h | 8 +++++ 3 files changed, 87 insertions(+), 79 deletions(-) diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c index c20f1dcfb7e2..1312a5e03ec7 100644 --- a/tools/perf/builtin-evlist.c +++ b/tools/perf/builtin-evlist.c @@ -15,39 +15,6 @@ #include "util/parse-options.h" #include "util/session.h" -struct perf_attr_details { - bool freq; - bool verbose; -}; - -static int comma_printf(bool *first, const char *fmt, ...) -{ - va_list args; - int ret = 0; - - if (!*first) { - ret += printf(","); - } else { - ret += printf(":"); - *first = false; - } - - va_start(args, fmt); - ret += vprintf(fmt, args); - va_end(args); - return ret; -} - -static int __if_print(bool *first, const char *field, u64 value) -{ - if (value == 0) - return 0; - - return comma_printf(first, " %s: %" PRIu64, field, value); -} - -#define if_print(field) __if_print(&first, #field, pos->attr.field) - static int __cmd_evlist(const char *file_name, struct perf_attr_details *details) { struct perf_session *session; @@ -57,52 +24,8 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details if (session == NULL) return -ENOMEM; - list_for_each_entry(pos, &session->evlist->entries, node) { - bool first = true; - - printf("%s", perf_evsel__name(pos)); - - if (details->verbose || details->freq) { - comma_printf(&first, " sample_freq=%" PRIu64, - (u64)pos->attr.sample_freq); - } - - if (details->verbose) { - if_print(type); - if_print(config); - if_print(config1); - if_print(config2); - if_print(size); - if_print(sample_type); - if_print(read_format); - if_print(disabled); - if_print(inherit); - if_print(pinned); - if_print(exclusive); - if_print(exclude_user); - if_print(exclude_kernel); - if_print(exclude_hv); - if_print(exclude_idle); - if_print(mmap); - if_print(comm); - if_print(freq); - if_print(inherit_stat); - if_print(enable_on_exec); - if_print(task); - if_print(watermark); - if_print(precise_ip); - if_print(mmap_data); - if_print(sample_id_all); - if_print(exclude_host); - if_print(exclude_guest); - if_print(__reserved_1); - if_print(wakeup_events); - if_print(bp_type); - if_print(branch_sample_type); - } - - putchar('\n'); - } + list_for_each_entry(pos, &session->evlist->entries, node) + perf_evsel__fprintf(pos, details, stdout); perf_session__delete(session); return 0; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 2492d329a5f5..643df4b06b84 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1228,3 +1228,80 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, return 0; } + +static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) +{ + va_list args; + int ret = 0; + + if (!*first) { + ret += fprintf(fp, ","); + } else { + ret += fprintf(fp, ":"); + *first = false; + } + + va_start(args, fmt); + ret += vfprintf(fp, fmt, args); + va_end(args); + return ret; +} + +static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value) +{ + if (value == 0) + return 0; + + return comma_fprintf(fp, first, " %s: %" PRIu64, field, value); +} + +#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field) + +int perf_evsel__fprintf(struct perf_evsel *evsel, + struct perf_attr_details *details, FILE *fp) +{ + bool first = true; + int printed = fprintf(fp, "%s", perf_evsel__name(evsel)); + + if (details->verbose || details->freq) { + printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64, + (u64)evsel->attr.sample_freq); + } + + if (details->verbose) { + if_print(type); + if_print(config); + if_print(config1); + if_print(config2); + if_print(size); + if_print(sample_type); + if_print(read_format); + if_print(disabled); + if_print(inherit); + if_print(pinned); + if_print(exclusive); + if_print(exclude_user); + if_print(exclude_kernel); + if_print(exclude_hv); + if_print(exclude_idle); + if_print(mmap); + if_print(comm); + if_print(freq); + if_print(inherit_stat); + if_print(enable_on_exec); + if_print(task); + if_print(watermark); + if_print(precise_ip); + if_print(mmap_data); + if_print(sample_id_all); + if_print(exclude_host); + if_print(exclude_guest); + if_print(__reserved_1); + if_print(wakeup_events); + if_print(bp_type); + if_print(branch_sample_type); + } + + fputc('\n', fp); + return ++printed; +} diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 5c089775064f..9cb8a0215711 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -243,4 +243,12 @@ static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel) { return evsel->leader == evsel; } + +struct perf_attr_details { + bool freq; + bool verbose; +}; + +int perf_evsel__fprintf(struct perf_evsel *evsel, + struct perf_attr_details *details, FILE *fp); #endif /* __PERF_EVSEL_H */ -- cgit v1.2.3-59-g8ed1b From 40c5ec7acb9278833cfbf9dd15c6c02d57cbaf77 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Dec 2012 09:54:21 -0300 Subject: perf tools: Add install-bin Makefile target Its too annoying to go over the Documentation install target while developing the tools. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-cfzcxj8sp727h0sgfcvvwva1@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 10aa24c74a28..317766ec070b 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -1102,7 +1102,7 @@ perfexec_instdir = $(prefix)/$(perfexecdir) endif perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) -install: all try-install-man +install-bin: all $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' @@ -1123,6 +1123,8 @@ install: all try-install-man $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' +install: install-bin try-install-man + install-python_ext: $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' -- cgit v1.2.3-59-g8ed1b From c79a439338f3a021f5a5ff6ea165b0d19d2eb0aa Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Dec 2012 10:54:12 -0300 Subject: perf evsel: Decode read_format and sample_type in perf_evsel__fprintf Before those fields showed just a number, now it decodes each bit: [root@sandy linux]# perf evlist -v cycles: sample_freq=4000, size: 96, sample_type: IP|TID|TIME|CPU|PERIOD, read_format: TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING|ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, sample_id_all: 1, exclude_guest: 1 Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-64ezdtiijolgti08ae3phxyj@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 643df4b06b84..7a2a3dc3ff03 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1257,6 +1257,53 @@ static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value) #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field) +struct bit_names { + int bit; + const char *name; +}; + +static int bits__fprintf(FILE *fp, const char *field, u64 value, + struct bit_names *bits, bool *first) +{ + int i = 0, printed = comma_fprintf(fp, first, " %s: ", field); + bool first_bit = true; + + do { + if (value & bits[i].bit) { + printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name); + first_bit = false; + } + } while (bits[++i].name != NULL); + + return printed; +} + +static int sample_type__fprintf(FILE *fp, bool *first, u64 value) +{ +#define bit_name(n) { PERF_SAMPLE_##n, #n } + struct bit_names bits[] = { + bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), + bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), + bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), + bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), + { .name = NULL, } + }; +#undef bit_name + return bits__fprintf(fp, "sample_type", value, bits, first); +} + +static int read_format__fprintf(FILE *fp, bool *first, u64 value) +{ +#define bit_name(n) { PERF_FORMAT_##n, #n } + struct bit_names bits[] = { + bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), + bit_name(ID), bit_name(GROUP), + { .name = NULL, } + }; +#undef bit_name + return bits__fprintf(fp, "read_format", value, bits, first); +} + int perf_evsel__fprintf(struct perf_evsel *evsel, struct perf_attr_details *details, FILE *fp) { @@ -1274,8 +1321,9 @@ int perf_evsel__fprintf(struct perf_evsel *evsel, if_print(config1); if_print(config2); if_print(size); - if_print(sample_type); - if_print(read_format); + printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type); + if (evsel->attr.read_format) + printed += read_format__fprintf(fp, &first, evsel->attr.read_format); if_print(disabled); if_print(inherit); if_print(pinned); -- cgit v1.2.3-59-g8ed1b From c5ff78c3092d0e7d14c82d2949e16fee063a83f1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Dec 2012 16:16:47 -0300 Subject: perf record: Pass perf_record_opts to the callchain cmdline parsing callback Its all it uses and makes the parsing callback suitable for use by 'perf top', which will happen in a followup patch. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-wb9eti78bk2jd7wpasro8hsz@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index fc4f08044632..d035040efe1d 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -868,11 +868,9 @@ static int get_stack_size(char *str, unsigned long *_size) } #endif /* LIBUNWIND_SUPPORT */ -static int -parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, - int unset) +static int parse_callchain_opt(const struct option *opt, const char *arg, int unset) { - struct perf_record *rec = (struct perf_record *)opt->value; + struct perf_record_opts *opts = opt->value; char *tok, *name, *saveptr = NULL; char *buf; int ret = -1; @@ -898,7 +896,7 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, /* Framepointer style */ if (!strncmp(name, "fp", sizeof("fp"))) { if (!strtok_r(NULL, ",", &saveptr)) { - rec->opts.call_graph = CALLCHAIN_FP; + opts->call_graph = CALLCHAIN_FP; ret = 0; } else pr_err("callchain: No more arguments " @@ -911,20 +909,20 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, const unsigned long default_stack_dump_size = 8192; ret = 0; - rec->opts.call_graph = CALLCHAIN_DWARF; - rec->opts.stack_dump_size = default_stack_dump_size; + opts->call_graph = CALLCHAIN_DWARF; + opts->stack_dump_size = default_stack_dump_size; tok = strtok_r(NULL, ",", &saveptr); if (tok) { unsigned long size = 0; ret = get_stack_size(tok, &size); - rec->opts.stack_dump_size = size; + opts->stack_dump_size = size; } if (!ret) pr_debug("callchain: stack dump size %d\n", - rec->opts.stack_dump_size); + opts->stack_dump_size); #endif /* LIBUNWIND_SUPPORT */ } else { pr_err("callchain: Unknown -g option " @@ -937,7 +935,7 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, free(buf); if (!ret) - pr_debug("callchain: type %d\n", rec->opts.call_graph); + pr_debug("callchain: type %d\n", opts->call_graph); return ret; } @@ -1021,9 +1019,9 @@ const struct option record_options[] = { "number of mmap data pages"), OPT_BOOLEAN(0, "group", &record.opts.group, "put the counters into a counter group"), - OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]", - callchain_help, &parse_callchain_opt, - "fp"), + OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts, + "mode[,dump_size]", callchain_help, + &parse_callchain_opt, "fp"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), -- cgit v1.2.3-59-g8ed1b From 75d9a10854db6aab2400cd6a844c392107be4c64 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Dec 2012 16:46:05 -0300 Subject: perf record: Export the callchain parsing routine and help Will be used by perf top, that will first setup the symbol system to deal with callchains and then call these routines to ask the kernel for callchains. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-jg0dh8rmlx7x11e7u7mnasvd@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 11 ++++++----- tools/perf/util/callchain.h | 5 +++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index d035040efe1d..028de726b832 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -868,7 +868,8 @@ static int get_stack_size(char *str, unsigned long *_size) } #endif /* LIBUNWIND_SUPPORT */ -static int parse_callchain_opt(const struct option *opt, const char *arg, int unset) +int record_parse_callchain_opt(const struct option *opt, + const char *arg, int unset) { struct perf_record_opts *opts = opt->value; char *tok, *name, *saveptr = NULL; @@ -973,9 +974,9 @@ static struct perf_record record = { #define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: " #ifdef LIBUNWIND_SUPPORT -static const char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf"; +const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf"; #else -static const char callchain_help[] = CALLCHAIN_HELP "[fp]"; +const char record_callchain_help[] = CALLCHAIN_HELP "[fp]"; #endif /* @@ -1020,8 +1021,8 @@ const struct option record_options[] = { OPT_BOOLEAN(0, "group", &record.opts.group, "put the counters into a counter group"), OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts, - "mode[,dump_size]", callchain_help, - &parse_callchain_opt, "fp"), + "mode[,dump_size]", record_callchain_help, + &record_parse_callchain_opt, "fp"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index eb340571e7d6..3ee9f67d5af0 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -143,4 +143,9 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor) cursor->curr = cursor->curr->next; cursor->pos++; } + +struct option; + +int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset); +extern const char record_callchain_help[]; #endif /* __PERF_CALLCHAIN_H */ -- cgit v1.2.3-59-g8ed1b From 2376c67a7bbc7849b806688ba2efb8520c21c458 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Dec 2012 16:48:41 -0300 Subject: perf top: Use perf_evlist__config() Using struct perf_record_opts to specify how to configure the evsel perf_event_attrs. This gets top closer to record in the way it sets up evsels, with the aim of sharing more and more to the point that both will be a single utility. In this direction top now uses the same callchain option parsing as record and that brings DWARF callchains to top, something that was already available for record. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-u03o0bsrqcjgskciso3pvsjr@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-top.txt | 2 +- tools/perf/builtin-top.c | 204 +++++++++++----------------------- tools/perf/util/top.c | 22 ++-- tools/perf/util/top.h | 8 +- 4 files changed, 79 insertions(+), 157 deletions(-) diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index 5b80d84d6b4a..a414bc95fd52 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -60,7 +60,7 @@ Default is to monitor all CPUS. -i:: --inherit:: - Child tasks inherit counters, only makes sens with -p option. + Child tasks do not inherit counters. -k :: --vmlinux=:: diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index a30647487ba3..b7d2ea62dbc6 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -596,7 +596,7 @@ static void *display_thread_tui(void *arg) * via --uid. */ list_for_each_entry(pos, &top->evlist->entries, node) - pos->hists.uid_filter_str = top->target.uid_str; + pos->hists.uid_filter_str = top->record_opts.target.uid_str; perf_evlist__tui_browse_hists(top->evlist, help, &hbt, &top->session->header.env); @@ -894,34 +894,13 @@ static void perf_top__start_counters(struct perf_top *top) { struct perf_evsel *counter; struct perf_evlist *evlist = top->evlist; + struct perf_record_opts *opts = &top->record_opts; - if (top->group) - perf_evlist__set_leader(evlist); + perf_evlist__config(evlist, opts); list_for_each_entry(counter, &evlist->entries, node) { struct perf_event_attr *attr = &counter->attr; - perf_evsel__set_sample_bit(counter, IP); - perf_evsel__set_sample_bit(counter, TID); - - if (top->freq) { - perf_evsel__set_sample_bit(counter, PERIOD); - attr->freq = 1; - attr->sample_freq = top->freq; - } - - if (evlist->nr_entries > 1) - perf_evsel__set_sample_id(counter); - - if (perf_target__has_cpu(&top->target)) - perf_evsel__set_sample_bit(counter, CPU); - - if (symbol_conf.use_callchain) - perf_evsel__set_sample_bit(counter, CALLCHAIN); - - attr->mmap = 1; - attr->comm = 1; - attr->inherit = top->inherit; fallback_missing_features: if (top->exclude_guest_missing) attr->exclude_guest = attr->exclude_host = 0; @@ -995,7 +974,7 @@ try_again: } } - if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) { + if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) { ui__error("Failed to mmap with %d (%s)\n", errno, strerror(errno)); goto out_err; @@ -1015,7 +994,7 @@ static int perf_top__setup_sample_type(struct perf_top *top) ui__error("Selected -g but \"sym\" not present in --sort/-s."); return -EINVAL; } - } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) { + } else if (callchain_param.mode != CHAIN_NONE) { if (callchain_register_param(&callchain_param) < 0) { ui__error("Can't register callchain params.\n"); return -EINVAL; @@ -1027,6 +1006,7 @@ static int perf_top__setup_sample_type(struct perf_top *top) static int __cmd_top(struct perf_top *top) { + struct perf_record_opts *opts = &top->record_opts; pthread_t thread; int ret; /* @@ -1041,7 +1021,7 @@ static int __cmd_top(struct perf_top *top) if (ret) goto out_delete; - if (perf_target__has_task(&top->target)) + if (perf_target__has_task(&opts->target)) perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, perf_event__process, &top->session->host_machine); @@ -1052,6 +1032,17 @@ static int __cmd_top(struct perf_top *top) top->session->evlist = top->evlist; perf_session__set_id_hdr_size(top->session); + /* + * When perf is starting the traced process, all the events (apart from + * group members) have enable_on_exec=1 set, so don't spoil it by + * prematurely enabling them. + * + * XXX 'top' still doesn't start workloads like record, trace, but should, + * so leave the check here. + */ + if (!perf_target__none(&opts->target)) + perf_evlist__enable(top->evlist); + /* Wait for a minimal set of events before starting the snapshot */ poll(top->evlist->pollfd, top->evlist->nr_fds, 100); @@ -1092,116 +1083,56 @@ out_delete: static int parse_callchain_opt(const struct option *opt, const char *arg, int unset) { - struct perf_top *top = (struct perf_top *)opt->value; - char *tok, *tok2; - char *endptr; - /* * --no-call-graph */ - if (unset) { - top->dont_use_callchains = true; + if (unset) return 0; - } symbol_conf.use_callchain = true; - if (!arg) - return 0; - - tok = strtok((char *)arg, ","); - if (!tok) - return -1; - - /* get the output mode */ - if (!strncmp(tok, "graph", strlen(arg))) - callchain_param.mode = CHAIN_GRAPH_ABS; - - else if (!strncmp(tok, "flat", strlen(arg))) - callchain_param.mode = CHAIN_FLAT; - - else if (!strncmp(tok, "fractal", strlen(arg))) - callchain_param.mode = CHAIN_GRAPH_REL; - - else if (!strncmp(tok, "none", strlen(arg))) { - callchain_param.mode = CHAIN_NONE; - symbol_conf.use_callchain = false; - - return 0; - } else - return -1; - - /* get the min percentage */ - tok = strtok(NULL, ","); - if (!tok) - goto setup; - - callchain_param.min_percent = strtod(tok, &endptr); - if (tok == endptr) - return -1; - - /* get the print limit */ - tok2 = strtok(NULL, ","); - if (!tok2) - goto setup; - - if (tok2[0] != 'c') { - callchain_param.print_limit = strtod(tok2, &endptr); - tok2 = strtok(NULL, ","); - if (!tok2) - goto setup; - } - - /* get the call chain order */ - if (!strcmp(tok2, "caller")) - callchain_param.order = ORDER_CALLER; - else if (!strcmp(tok2, "callee")) - callchain_param.order = ORDER_CALLEE; - else - return -1; -setup: - if (callchain_register_param(&callchain_param) < 0) { - fprintf(stderr, "Can't register callchain params\n"); - return -1; - } - return 0; + return record_parse_callchain_opt(opt, arg, unset); } int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) { - struct perf_evsel *pos; int status; char errbuf[BUFSIZ]; struct perf_top top = { .count_filter = 5, .delay_secs = 2, - .freq = 4000, /* 4 KHz */ - .mmap_pages = 128, - .sym_pcnt_filter = 5, - .target = { - .uses_mmap = true, + .record_opts = { + .mmap_pages = UINT_MAX, + .user_freq = UINT_MAX, + .user_interval = ULLONG_MAX, + .freq = 4000, /* 4 KHz */ + .target = { + .uses_mmap = true, + }, }, + .sym_pcnt_filter = 5, }; - char callchain_default_opt[] = "fractal,0.5,callee"; + struct perf_record_opts *opts = &top.record_opts; + struct perf_target *target = &opts->target; const struct option options[] = { OPT_CALLBACK('e', "event", &top.evlist, "event", "event selector. use 'perf list' to list available events", parse_events_option), - OPT_INTEGER('c', "count", &top.default_interval, - "event period to sample"), - OPT_STRING('p', "pid", &top.target.pid, "pid", + OPT_U64('c', "count", &opts->user_interval, "event period to sample"), + OPT_STRING('p', "pid", &target->pid, "pid", "profile events on existing process id"), - OPT_STRING('t', "tid", &top.target.tid, "tid", + OPT_STRING('t', "tid", &target->tid, "tid", "profile events on existing thread id"), - OPT_BOOLEAN('a', "all-cpus", &top.target.system_wide, + OPT_BOOLEAN('a', "all-cpus", &target->system_wide, "system-wide collection from all CPUs"), - OPT_STRING('C', "cpu", &top.target.cpu_list, "cpu", + OPT_STRING('C', "cpu", &target->cpu_list, "cpu", "list of cpus to monitor"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols, "hide kernel symbols"), - OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"), + OPT_UINTEGER('m', "mmap-pages", &opts->mmap_pages, + "number of mmap data pages"), OPT_INTEGER('r', "realtime", &top.realtime_prio, "collect data with this RT SCHED_FIFO priority"), OPT_INTEGER('d', "delay", &top.delay_secs, @@ -1210,16 +1141,14 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) "dump the symbol table used for profiling"), OPT_INTEGER('f', "count-filter", &top.count_filter, "only display functions with more events than this"), - OPT_BOOLEAN('g', "group", &top.group, + OPT_BOOLEAN('g', "group", &opts->group, "put the counters into a counter group"), - OPT_BOOLEAN('i', "inherit", &top.inherit, - "child tasks inherit counters"), + OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit, + "child tasks do not inherit counters"), OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name", "symbol to annotate"), - OPT_BOOLEAN('z', "zero", &top.zero, - "zero history across updates"), - OPT_INTEGER('F', "freq", &top.freq, - "profile at this frequency"), + OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"), + OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"), OPT_INTEGER('E', "entries", &top.print_entries, "display this many functions"), OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols, @@ -1232,10 +1161,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) "sort by key(s): pid, comm, dso, symbol, parent"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, "Show a column with the number of samples"), - OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order", - "Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. " - "Default: fractal,0.5,callee", &parse_callchain_opt, - callchain_default_opt), + OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts, + "mode[,dump_size]", record_callchain_help, + &parse_callchain_opt, "fp"), OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", @@ -1250,7 +1178,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) "Display raw encoding of assembly instructions (default)"), OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), - OPT_STRING('u', "uid", &top.target.uid_str, "user", "user to profile"), + OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"), OPT_END() }; const char * const top_usage[] = { @@ -1280,27 +1208,27 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) setup_browser(false); - status = perf_target__validate(&top.target); + status = perf_target__validate(target); if (status) { - perf_target__strerror(&top.target, status, errbuf, BUFSIZ); + perf_target__strerror(target, status, errbuf, BUFSIZ); ui__warning("%s", errbuf); } - status = perf_target__parse_uid(&top.target); + status = perf_target__parse_uid(target); if (status) { int saved_errno = errno; - perf_target__strerror(&top.target, status, errbuf, BUFSIZ); + perf_target__strerror(target, status, errbuf, BUFSIZ); ui__error("%s", errbuf); status = -saved_errno; goto out_delete_evlist; } - if (perf_target__none(&top.target)) - top.target.system_wide = true; + if (perf_target__none(target)) + target->system_wide = true; - if (perf_evlist__create_maps(top.evlist, &top.target) < 0) + if (perf_evlist__create_maps(top.evlist, target) < 0) usage_with_options(top_usage, options); if (!top.evlist->nr_entries && @@ -1314,24 +1242,22 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) if (top.delay_secs < 1) top.delay_secs = 1; + if (opts->user_interval != ULLONG_MAX) + opts->default_interval = opts->user_interval; + if (opts->user_freq != UINT_MAX) + opts->freq = opts->user_freq; + /* * User specified count overrides default frequency. */ - if (top.default_interval) - top.freq = 0; - else if (top.freq) { - top.default_interval = top.freq; + if (opts->default_interval) + opts->freq = 0; + else if (opts->freq) { + opts->default_interval = opts->freq; } else { ui__error("frequency and count are zero, aborting\n"); - exit(EXIT_FAILURE); - } - - list_for_each_entry(pos, &top.evlist->entries, node) { - /* - * Fill in the ones not specifically initialized via -c: - */ - if (!pos->attr.sample_period) - pos->attr.sample_period = top.default_interval; + status = -EINVAL; + goto out_delete_evlist; } top.sym_evsel = perf_evlist__first(top.evlist); diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c index 884dde9b9bc1..54d37a4753c5 100644 --- a/tools/perf/util/top.c +++ b/tools/perf/util/top.c @@ -26,6 +26,8 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) float samples_per_sec = top->samples / top->delay_secs; float ksamples_per_sec = top->kernel_samples / top->delay_secs; float esamples_percent = (100.0 * top->exact_samples) / top->samples; + struct perf_record_opts *opts = &top->record_opts; + struct perf_target *target = &opts->target; size_t ret = 0; if (!perf_guest) { @@ -61,31 +63,31 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) struct perf_evsel *first = perf_evlist__first(top->evlist); ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", (uint64_t)first->attr.sample_period, - top->freq ? "Hz" : ""); + opts->freq ? "Hz" : ""); } ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel)); ret += SNPRINTF(bf + ret, size - ret, "], "); - if (top->target.pid) + if (target->pid) ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s", - top->target.pid); - else if (top->target.tid) + target->pid); + else if (target->tid) ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s", - top->target.tid); - else if (top->target.uid_str != NULL) + target->tid); + else if (target->uid_str != NULL) ret += SNPRINTF(bf + ret, size - ret, " (uid: %s", - top->target.uid_str); + target->uid_str); else ret += SNPRINTF(bf + ret, size - ret, " (all"); - if (top->target.cpu_list) + if (target->cpu_list) ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", top->evlist->cpus->nr > 1 ? "s" : "", - top->target.cpu_list); + target->cpu_list); else { - if (top->target.tid) + if (target->tid) ret += SNPRINTF(bf + ret, size - ret, ")"); else ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index 86ff1b15059b..927c229c2d9a 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -14,7 +14,7 @@ struct perf_session; struct perf_top { struct perf_tool tool; struct perf_evlist *evlist; - struct perf_target target; + struct perf_record_opts record_opts; /* * Symbols will be added here in perf_event__process_sample and will * get out after decayed. @@ -24,15 +24,11 @@ struct perf_top { u64 exact_samples; u64 guest_us_samples, guest_kernel_samples; int print_entries, count_filter, delay_secs; - int freq; bool hide_kernel_symbols, hide_user_symbols, zero; bool use_tui, use_stdio; bool sort_has_symbols; - bool dont_use_callchains; bool kptr_restrict_warned; bool vmlinux_warned; - bool inherit; - bool group; bool sample_id_all_missing; bool exclude_guest_missing; bool dump_symtab; @@ -40,8 +36,6 @@ struct perf_top { struct perf_evsel *sym_evsel; struct perf_session *session; struct winsize winsize; - unsigned int mmap_pages; - int default_interval; int realtime_prio; int sym_pcnt_filter; const char *sym_filter; -- cgit v1.2.3-59-g8ed1b From 1ca1d8d54f925ad0eb6d9806ecd4309738f25301 Mon Sep 17 00:00:00 2001 From: Lance Ortiz Date: Thu, 3 Jan 2013 15:34:01 -0700 Subject: aerdrv: Trace Event for PCI Express Advanced Error Reporting This header file will define a new trace event that will be triggered when a AER event occurs. The following data will be provided to the trace event. char * dev_name - The name of the slot where the device resides ([domain:]bus:device.function). u32 status - Either the correctable or uncorrectable register indicating what error or errors have been see. u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED The trace event will also provide a trace string that may look like: "0000:05:00.0 PCIe Bus Error:severity=Uncorrected (Non-Fatal), Poisoned TLP" Signed-off-by: Lance Ortiz Acked-by: Mauro Carvalho Chehab Acked-by: Boris Petkov Signed-off-by: Tony Luck --- include/trace/events/ras.h | 77 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 include/trace/events/ras.h diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h new file mode 100644 index 000000000000..88b878383797 --- /dev/null +++ b/include/trace/events/ras.h @@ -0,0 +1,77 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ras + +#if !defined(_TRACE_AER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_AER_H + +#include +#include + + +/* + * PCIe AER Trace event + * + * These events are generated when hardware detects a corrected or + * uncorrected event on a PCIe device. The event report has + * the following structure: + * + * char * dev_name - The name of the slot where the device resides + * ([domain:]bus:device.function). + * u32 status - Either the correctable or uncorrectable register + * indicating what error or errors have been seen + * u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED + */ + +#define aer_correctable_errors \ + {BIT(0), "Receiver Error"}, \ + {BIT(6), "Bad TLP"}, \ + {BIT(7), "Bad DLLP"}, \ + {BIT(8), "RELAY_NUM Rollover"}, \ + {BIT(12), "Replay Timer Timeout"}, \ + {BIT(13), "Advisory Non-Fatal"} + +#define aer_uncorrectable_errors \ + {BIT(4), "Data Link Protocol"}, \ + {BIT(12), "Poisoned TLP"}, \ + {BIT(13), "Flow Control Protocol"}, \ + {BIT(14), "Completion Timeout"}, \ + {BIT(15), "Completer Abort"}, \ + {BIT(16), "Unexpected Completion"}, \ + {BIT(17), "Receiver Overflow"}, \ + {BIT(18), "Malformed TLP"}, \ + {BIT(19), "ECRC"}, \ + {BIT(20), "Unsupported Request"} + +TRACE_EVENT(aer_event, + TP_PROTO(const char *dev_name, + const u32 status, + const u8 severity), + + TP_ARGS(dev_name, status, severity), + + TP_STRUCT__entry( + __string( dev_name, dev_name ) + __field( u32, status ) + __field( u8, severity ) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name); + __entry->status = status; + __entry->severity = severity; + ), + + TP_printk("%s PCIe Bus Error: severity=%s, %s\n", + __get_str(dev_name), + __entry->severity == HW_EVENT_ERR_CORRECTED ? "Corrected" : + __entry->severity == HW_EVENT_ERR_FATAL ? + "Fatal" : "Uncorrected", + __entry->severity == HW_EVENT_ERR_CORRECTED ? + __print_flags(__entry->status, "|", aer_correctable_errors) : + __print_flags(__entry->status, "|", aer_uncorrectable_errors)) +); + +#endif /* _TRACE_AER_H */ + +/* This part must be outside protection */ +#include -- cgit v1.2.3-59-g8ed1b From 1d5210008bd3a26daf4b06aed9d6c330dd4c83e2 Mon Sep 17 00:00:00 2001 From: Lance Ortiz Date: Thu, 3 Jan 2013 15:34:08 -0700 Subject: aerdrv: Enhanced AER logging This patch will provide a more reliable and easy way for user-space applications to have access to AER logs rather than reading them from the message buffer. It also provides a way to notify user-space when an AER event occurs. The aer driver is updated to generate a trace event of function 'aer_event' when a PCIe error is reported over the AER interface. The trace event was added to both the interrupt based aer path and the firmware first path. Signed-off-by: Lance Ortiz Acked-by: Mauro Carvalho Chehab Acked-by: Boris Petkov Signed-off-by: Tony Luck --- drivers/acpi/apei/cper.c | 19 ++++++++++++++++--- drivers/pci/pcie/aer/aerdrv_errprint.c | 9 ++++++++- include/linux/aer.h | 4 ++-- 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c index e6defd86b424..1e5d8a40101e 100644 --- a/drivers/acpi/apei/cper.c +++ b/drivers/acpi/apei/cper.c @@ -29,6 +29,7 @@ #include #include #include +#include #include /* @@ -249,6 +250,10 @@ static const char *cper_pcie_port_type_strs[] = { static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, const struct acpi_hest_generic_data *gdata) { +#ifdef CONFIG_ACPI_APEI_PCIEAER + struct pci_dev *dev; +#endif + if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ? @@ -281,10 +286,18 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", pfx, pcie->bridge.secondary_status, pcie->bridge.control); #ifdef CONFIG_ACPI_APEI_PCIEAER - if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) { - struct aer_capability_regs *aer_regs = (void *)pcie->aer_info; - cper_print_aer(pfx, gdata->error_severity, aer_regs); + dev = pci_get_domain_bus_and_slot(pcie->device_id.segment, + pcie->device_id.bus, pcie->device_id.function); + if (!dev) { + pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n", + pcie->device_id.segment, pcie->device_id.bus, + pcie->device_id.slot, pcie->device_id.function); + return; } + if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) + cper_print_aer(pfx, dev, gdata->error_severity, + (struct aer_capability_regs *) pcie->aer_info); + pci_dev_put(dev); #endif } diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 3ea51736f18d..d3e5fc5a2de9 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c @@ -23,6 +23,9 @@ #include "aerdrv.h" +#define CREATE_TRACE_POINTS +#include + #define AER_AGENT_RECEIVER 0 #define AER_AGENT_REQUESTER 1 #define AER_AGENT_COMPLETER 2 @@ -194,6 +197,8 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) if (info->id && info->error_dev_num > 1 && info->id == id) printk("%s"" Error of this Agent(%04x) is reported first\n", prefix, id); + trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), + info->severity); } void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) @@ -217,7 +222,7 @@ int cper_severity_to_aer(int cper_severity) } EXPORT_SYMBOL_GPL(cper_severity_to_aer); -void cper_print_aer(const char *prefix, int cper_severity, +void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity, struct aer_capability_regs *aer) { int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0; @@ -259,5 +264,7 @@ void cper_print_aer(const char *prefix, int cper_severity, *(tlp + 8), *(tlp + 15), *(tlp + 14), *(tlp + 13), *(tlp + 12)); } + trace_aer_event(dev_name(&dev->dev), (status & ~mask), + aer_severity); } #endif diff --git a/include/linux/aer.h b/include/linux/aer.h index 544abdb2238c..ec10e1b24c1c 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -49,8 +49,8 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) } #endif -extern void cper_print_aer(const char *prefix, int cper_severity, - struct aer_capability_regs *aer); +extern void cper_print_aer(const char *prefix, struct pci_dev *dev, + int cper_severity, struct aer_capability_regs *aer); extern int cper_severity_to_aer(int cper_severity); extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, int severity); -- cgit v1.2.3-59-g8ed1b From 2cced2d95961acd318e9395578a60ee424d9db80 Mon Sep 17 00:00:00 2001 From: Lance Ortiz Date: Thu, 3 Jan 2013 15:34:15 -0700 Subject: aerdrv: Cleanup log output for AER These changes make cper_print_aer more consistent with aer_print_error and clean things up by elimiating the use of the prefix variable and replacing it with dev_printk. Signed-off-by: Lance Ortiz Acked-by: Boris Petkov Signed-off-by: Tony Luck --- drivers/pci/pcie/aer/aerdrv_errprint.c | 54 ++++++++++++++++------------------ 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index d3e5fc5a2de9..5ab14251839d 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c @@ -124,12 +124,11 @@ static const char *aer_agent_string[] = { "Transmitter ID" }; -static void __aer_print_error(const char *prefix, +static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info) { int i, status; const char *errmsg = NULL; - status = (info->status & ~info->mask); for (i = 0; i < 32; i++) { @@ -144,26 +143,22 @@ static void __aer_print_error(const char *prefix, aer_uncorrectable_error_string[i] : NULL; if (errmsg) - printk("%s"" [%2d] %-22s%s\n", prefix, i, errmsg, + dev_err(&dev->dev, " [%2d] %-22s%s\n", i, errmsg, info->first_error == i ? " (First)" : ""); else - printk("%s"" [%2d] Unknown Error Bit%s\n", prefix, i, - info->first_error == i ? " (First)" : ""); + dev_err(&dev->dev, " [%2d] Unknown Error Bit%s\n", + i, info->first_error == i ? " (First)" : ""); } } void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) { int id = ((dev->bus->number << 8) | dev->devfn); - char prefix[44]; - - snprintf(prefix, sizeof(prefix), "%s%s %s: ", - (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR, - dev_driver_string(&dev->dev), dev_name(&dev->dev)); if (info->status == 0) { - printk("%s""PCIe Bus Error: severity=%s, type=Unaccessible, " - "id=%04x(Unregistered Agent ID)\n", prefix, + dev_err(&dev->dev, + "PCIe Bus Error: severity=%s, type=Unaccessible, " + "id=%04x(Unregistered Agent ID)\n", aer_error_severity_string[info->severity], id); } else { int layer, agent; @@ -171,22 +166,24 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) layer = AER_GET_LAYER_ERROR(info->severity, info->status); agent = AER_GET_AGENT(info->severity, info->status); - printk("%s""PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", - prefix, aer_error_severity_string[info->severity], + dev_err(&dev->dev, + "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", + aer_error_severity_string[info->severity], aer_error_layer[layer], id, aer_agent_string[agent]); - printk("%s"" device [%04x:%04x] error status/mask=%08x/%08x\n", - prefix, dev->vendor, dev->device, + dev_err(&dev->dev, + " device [%04x:%04x] error status/mask=%08x/%08x\n", + dev->vendor, dev->device, info->status, info->mask); - __aer_print_error(prefix, info); + __aer_print_error(dev, info); if (info->tlp_header_valid) { unsigned char *tlp = (unsigned char *) &info->tlp; - printk("%s"" TLP Header:" + dev_err(&dev->dev, " TLP Header:" " %02x%02x%02x%02x %02x%02x%02x%02x" " %02x%02x%02x%02x %02x%02x%02x%02x\n", - prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, + *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), *(tlp + 11), *(tlp + 10), *(tlp + 9), *(tlp + 8), *(tlp + 15), *(tlp + 14), @@ -195,8 +192,9 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) } if (info->id && info->error_dev_num > 1 && info->id == id) - printk("%s"" Error of this Agent(%04x) is reported first\n", - prefix, id); + dev_err(&dev->dev, + " Error of this Agent(%04x) is reported first\n", + id); trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), info->severity); } @@ -244,21 +242,21 @@ void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity, } layer = AER_GET_LAYER_ERROR(aer_severity, status); agent = AER_GET_AGENT(aer_severity, status); - printk("%s""aer_status: 0x%08x, aer_mask: 0x%08x\n", - prefix, status, mask); + dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", + status, mask); cper_print_bits(prefix, status, status_strs, status_strs_size); - printk("%s""aer_layer=%s, aer_agent=%s\n", prefix, + dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", aer_error_layer[layer], aer_agent_string[agent]); if (aer_severity != AER_CORRECTABLE) - printk("%s""aer_uncor_severity: 0x%08x\n", - prefix, aer->uncor_severity); + dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n", + aer->uncor_severity); if (tlp_header_valid) { const unsigned char *tlp; tlp = (const unsigned char *)&aer->header_log; - printk("%s""aer_tlp_header:" + dev_err(&dev->dev, "aer_tlp_header:" " %02x%02x%02x%02x %02x%02x%02x%02x" " %02x%02x%02x%02x %02x%02x%02x%02x\n", - prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, + *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), *(tlp + 11), *(tlp + 10), *(tlp + 9), *(tlp + 8), *(tlp + 15), *(tlp + 14), -- cgit v1.2.3-59-g8ed1b From 418c59e49ddc77fcb7054f2c8d52c9d47403b43e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 14 Dec 2012 13:15:08 -0500 Subject: tracing: Fix sparse warning with is_signed_type() macro Sparse complains when is_signed_type() is used on a pointer. This macro is needed for the format output used for ftrace and perf, to know if a binary field is a signed type or not. The is_signed_type() macro is used against all fields that are recorded by events to automate the operation. The problem sparse has is with the current way is_signed_type() works: ((type)-1 < 0) If "type" is a poiner, than sparse does not like it being compared to an integer (zero). The simple fix is to just give zero the same type. The runtime result stays the same. Reported-by: Robert Jarzmik Signed-off-by: Steven Rostedt --- include/linux/ftrace_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index a3d489531d83..43ef8b6cce7f 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -272,7 +272,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, extern int trace_add_event_call(struct ftrace_event_call *call); extern void trace_remove_event_call(struct ftrace_event_call *call); -#define is_signed_type(type) (((type)(-1)) < 0) +#define is_signed_type(type) (((type)(-1)) < (type)0) int trace_set_clr_event(const char *system, const char *event, int set); -- cgit v1.2.3-59-g8ed1b From 771e03842a9e98a1c2013ca1ed8bb2793488f3e5 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 30 Nov 2012 10:41:57 -0500 Subject: ring-buffer: Remove unnecessary recusive call in rb_advance_iter() The original ring-buffer code had special checks at the start of rb_advance_iter() and instead of repeating them again at the end of the function if a certain condition existed, I just did a recursive call to rb_advance_iter() because the special condition would cause rb_advance_iter() to return early (after the checks). But as things have changed, the special checks no longer exist and the only thing done for the special_condition is to call rb_inc_iter() and return. Instead of doing a confusing recursive call, just call rb_inc_iter instead. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index ce8514feedcd..6ff9cc4658ed 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3425,7 +3425,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) /* check for end of page padding */ if ((iter->head >= rb_page_size(iter->head_page)) && (iter->head_page != cpu_buffer->commit_page)) - rb_advance_iter(iter); + rb_inc_iter(iter); } static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) -- cgit v1.2.3-59-g8ed1b From d8a0349c0cea477322c66ea9362f10c62fad5f62 Mon Sep 17 00:00:00 2001 From: Shan Wei Date: Tue, 13 Nov 2012 09:53:04 +0800 Subject: tracing: Use this_cpu_ptr per-cpu helper typeof(&buffer) is a pointer to array of 1024 char, or char (*)[1024]. But, typeof(&buffer[0]) is a pointer to char which match the return type of get_trace_buf(). As well-known, the value of &buffer is equal to &buffer[0]. so return this_cpu_ptr(&percpu_buffer->buffer[0]) can avoid type cast. Link: http://lkml.kernel.org/r/50A1A800.3020102@gmail.com Reviewed-by: Christoph Lameter Signed-off-by: Shan Wei Signed-off-by: Steven Rostedt --- kernel/trace/blktrace.c | 2 +- kernel/trace/trace.c | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index c0bd0308741c..71259e2b6b61 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -147,7 +147,7 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) return; local_irq_save(flags); - buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); + buf = this_cpu_ptr(bt->msg_data); va_start(args, fmt); n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); va_end(args); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3c13e46d7d24..f8b7c626f3fd 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1517,7 +1517,6 @@ static struct trace_buffer_struct *trace_percpu_nmi_buffer; static char *get_trace_buf(void) { struct trace_buffer_struct *percpu_buffer; - struct trace_buffer_struct *buffer; /* * If we have allocated per cpu buffers, then we do not @@ -1535,9 +1534,7 @@ static char *get_trace_buf(void) if (!percpu_buffer) return NULL; - buffer = per_cpu_ptr(percpu_buffer, smp_processor_id()); - - return buffer->buffer; + return this_cpu_ptr(&percpu_buffer->buffer[0]); } static int alloc_percpu_trace_buffer(void) -- cgit v1.2.3-59-g8ed1b From d24d7dbf3cc49b00a152e55e24f0eeb173c7a971 Mon Sep 17 00:00:00 2001 From: Jovi Zhang Date: Wed, 18 Jul 2012 18:16:44 +0800 Subject: tracing: Verify target file before registering a uprobe event Without this patch, we can register a uprobe event for a directory. Enabling such a uprobe event would anyway fail. Example: $ echo 'p /bin:0x4245c0' > /sys/kernel/debug/tracing/uprobe_events However dirctories cannot be valid targets for uprobe. Hence verify if the target is a regular file during the probe registration. Link: http://lkml.kernel.org/r/20130103004212.690763002@goodmis.org Cc: Namhyung Kim Signed-off-by: Jovi Zhang Acked-by: Srikar Dronamraju [ cleaned up whitespace and removed redundant IS_DIR() check ] Signed-off-by: Steven Rostedt --- kernel/trace/trace_uprobe.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index c86e6d4f67fb..87b6db4ccbc5 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -258,6 +258,10 @@ static int create_trace_uprobe(int argc, char **argv) goto fail_address_parse; inode = igrab(path.dentry->d_inode); + if (!S_ISREG(inode->i_mode)) { + ret = -EINVAL; + goto fail_address_parse; + } argc -= 2; argv += 2; @@ -356,7 +360,7 @@ fail_address_parse: if (inode) iput(inode); - pr_info("Failed to parse address.\n"); + pr_info("Failed to parse address or file.\n"); return ret; } -- cgit v1.2.3-59-g8ed1b From 6aea49cb5f3001a8275bf9c9f586ec3eb39af194 Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Wed, 21 Nov 2012 15:13:47 +0800 Subject: tracing/syscalls: Make local functions static Some functions in the syscall tracing is used only locally to the file, but they are labeled global. Convert them to static functions. Signed-off-by: Fengguang Wu Signed-off-by: Steven Rostedt --- kernel/trace/trace_syscalls.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 7609dd6714c2..5329e13e74a1 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -77,7 +77,7 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr) return syscalls_metadata[nr]; } -enum print_line_t +static enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags, struct trace_event *event) { @@ -130,7 +130,7 @@ end: return TRACE_TYPE_HANDLED; } -enum print_line_t +static enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags, struct trace_event *event) { @@ -270,7 +270,7 @@ static int syscall_exit_define_fields(struct ftrace_event_call *call) return ret; } -void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) +static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) { struct syscall_trace_enter *entry; struct syscall_metadata *sys_data; @@ -305,7 +305,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) trace_current_buffer_unlock_commit(buffer, event, 0, 0); } -void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) +static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) { struct syscall_trace_exit *entry; struct syscall_metadata *sys_data; @@ -337,7 +337,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) trace_current_buffer_unlock_commit(buffer, event, 0, 0); } -int reg_event_syscall_enter(struct ftrace_event_call *call) +static int reg_event_syscall_enter(struct ftrace_event_call *call) { int ret = 0; int num; @@ -356,7 +356,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) return ret; } -void unreg_event_syscall_enter(struct ftrace_event_call *call) +static void unreg_event_syscall_enter(struct ftrace_event_call *call) { int num; @@ -371,7 +371,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call) mutex_unlock(&syscall_trace_lock); } -int reg_event_syscall_exit(struct ftrace_event_call *call) +static int reg_event_syscall_exit(struct ftrace_event_call *call) { int ret = 0; int num; @@ -390,7 +390,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) return ret; } -void unreg_event_syscall_exit(struct ftrace_event_call *call) +static void unreg_event_syscall_exit(struct ftrace_event_call *call) { int num; @@ -459,7 +459,7 @@ unsigned long __init __weak arch_syscall_addr(int nr) return (unsigned long)sys_call_table[nr]; } -int __init init_ftrace_syscalls(void) +static int __init init_ftrace_syscalls(void) { struct syscall_metadata *meta; unsigned long addr; -- cgit v1.2.3-59-g8ed1b From a54164114b96b4693b42cdb553260eec41ea4393 Mon Sep 17 00:00:00 2001 From: Hiraku Toyooka Date: Wed, 19 Dec 2012 16:02:34 +0900 Subject: tracing: Add checks if tr->buffer is NULL in tracing_reset{_online_cpus} max_tr->buffer could be NULL in the tracing_reset{_online_cpus}. In this case, a NULL pointer dereference happens, so we should return immediately from these functions. Note, the current code does not call tracing_reset*() with max_tr when its buffer is NULL, but future code will. This patch is needed to prevent the future code from crashing. Link: http://lkml.kernel.org/r/20121219070234.31200.93863.stgit@liselsia Signed-off-by: Hiraku Toyooka Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f8b7c626f3fd..72b171b90e55 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -922,6 +922,9 @@ void tracing_reset(struct trace_array *tr, int cpu) { struct ring_buffer *buffer = tr->buffer; + if (!buffer) + return; + ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ @@ -936,6 +939,9 @@ void tracing_reset_online_cpus(struct trace_array *tr) struct ring_buffer *buffer = tr->buffer; int cpu; + if (!buffer) + return; + ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ -- cgit v1.2.3-59-g8ed1b From 84c6cf0db6a00601eb43cfc08244a398ffb0894c Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 20 Dec 2012 21:43:52 -0500 Subject: tracing: Remove unneeded check of max_tr->buffer before tracing_reset There's now a check in tracing_reset_online_cpus() if the buffer is allocated or NULL. No need to do a check before calling it with max_tr. Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 72b171b90e55..d62248dfda76 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4040,8 +4040,7 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, * Reset the buffer so that it doesn't have incomparable timestamps. */ tracing_reset_online_cpus(&global_trace); - if (max_tr.buffer) - tracing_reset_online_cpus(&max_tr); + tracing_reset_online_cpus(&max_tr); mutex_unlock(&trace_types_lock); -- cgit v1.2.3-59-g8ed1b From 0f1ac8fd254b6c3e77950a1c4ee67be5dc88f7e0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 15 Jan 2013 22:11:19 -0500 Subject: tracing/lockdep: Disable lockdep first in entering NMI When function tracing with either debug locks enabled or tracing preempt disabled, the add_preempt_count() is traced. This is an issue with lockdep and function tracing. As function tracing can disable interrupts, and lockdep records that change, lockdep may not be able to handle this recursion if it happens from an NMI context. The first thing that an NMI does is: #define nmi_enter() \ do { \ ftrace_nmi_enter(); \ BUG_ON(in_nmi()); \ add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ lockdep_off(); \ rcu_nmi_enter(); \ trace_hardirq_enter(); \ } while (0) When the add_preempt_count() is traced, and the tracing callback disables interrupts, it will jump into the lockdep code. There's some places in lockdep that can't handle this re-entrance, and causes lockdep to fail. As the lockdep_off() (and lockdep_on) is a simple: void lockdep_off(void) { current->lockdep_recursion++; } and is never traced, it can be called first in nmi_enter() and lockdep_on() last in nmi_exit(). Cc: Peter Zijlstra Signed-off-by: Steven Rostedt --- include/linux/hardirq.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 624ef3f45c8e..57bfdce8fb90 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -180,10 +180,10 @@ extern void irq_exit(void); #define nmi_enter() \ do { \ + lockdep_off(); \ ftrace_nmi_enter(); \ BUG_ON(in_nmi()); \ add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ - lockdep_off(); \ rcu_nmi_enter(); \ trace_hardirq_enter(); \ } while (0) @@ -192,10 +192,10 @@ extern void irq_exit(void); do { \ trace_hardirq_exit(); \ rcu_nmi_exit(); \ - lockdep_on(); \ BUG_ON(!in_nmi()); \ sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ ftrace_nmi_exit(); \ + lockdep_on(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ -- cgit v1.2.3-59-g8ed1b From 8741db532e86da2e54f05be751bfe1922ca63d57 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 16 Jan 2013 10:49:37 -0500 Subject: tracing/fgraph: Add max_graph_depth to limit function_graph depth Add the file max_graph_depth to the debug tracing directory that lets the user define the depth of the function graph. A very useful operation is to set the depth to 1. Then it traces only the first function that is called when entering the kernel. This can be used to determine what system operations interrupt a process. For example, to work on NOHZ processes (single tasks running without a timer tick), if any interrupt goes off and preempts that task, this code will show it happening. # cd /sys/kernel/debug/tracing # echo 1 > max_graph_depth # echo function_graph > current_tracer # cat per_cpu/cpu//trace Cc: Frederic Weisbecker Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions_graph.c | 60 ++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 4edb4b74eb7e..7008d2e13cf2 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -47,6 +47,8 @@ struct fgraph_data { #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 #define TRACE_GRAPH_PRINT_IRQS 0x40 +static unsigned int max_depth; + static struct tracer_opt trace_opts[] = { /* Display overruns? (for self-debug purpose) */ { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, @@ -250,8 +252,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) return 0; /* trace it when it is-nested-in or is a function enabled. */ - if (!(trace->depth || ftrace_graph_addr(trace->func)) || - ftrace_graph_ignore_irqs()) + if ((!(trace->depth || ftrace_graph_addr(trace->func)) || + ftrace_graph_ignore_irqs()) || + (max_depth && trace->depth >= max_depth)) return 0; local_irq_save(flags); @@ -1457,6 +1460,59 @@ static struct tracer graph_trace __read_mostly = { #endif }; + +static ssize_t +graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, + loff_t *ppos) +{ + unsigned long val; + int ret; + + ret = kstrtoul_from_user(ubuf, cnt, 10, &val); + if (ret) + return ret; + + max_depth = val; + + *ppos += cnt; + + return cnt; +} + +static ssize_t +graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, + loff_t *ppos) +{ + char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ + int n; + + n = sprintf(buf, "%d\n", max_depth); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); +} + +static const struct file_operations graph_depth_fops = { + .open = tracing_open_generic, + .write = graph_depth_write, + .read = graph_depth_read, + .llseek = generic_file_llseek, +}; + +static __init int init_graph_debugfs(void) +{ + struct dentry *d_tracer; + + d_tracer = tracing_init_dentry(); + if (!d_tracer) + return 0; + + trace_create_file("max_graph_depth", 0644, d_tracer, + NULL, &graph_depth_fops); + + return 0; +} +fs_initcall(init_graph_debugfs); + static __init int init_graph_trace(void) { max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); -- cgit v1.2.3-59-g8ed1b From 06aeaaeabf69da4a3e86df532425640f51b01cef Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 28 Sep 2012 17:15:17 +0900 Subject: ftrace: Move ARCH_SUPPORTS_FTRACE_SAVE_REGS in Kconfig Move SAVE_REGS support flag into Kconfig and rename it to CONFIG_DYNAMIC_FTRACE_WITH_REGS. This also introduces CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS which indicates the architecture depending part of ftrace has a code that saves full registers. On the other hand, CONFIG_DYNAMIC_FTRACE_WITH_REGS indicates the code is enabled. Link: http://lkml.kernel.org/r/20120928081516.3560.72534.stgit@ltc138.sdl.hitachi.co.jp Cc: Ingo Molnar Cc: Ananth N Mavinakayanahalli Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Frederic Weisbecker Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt --- arch/x86/Kconfig | 1 + arch/x86/include/asm/ftrace.h | 1 - include/linux/ftrace.h | 6 +++--- kernel/trace/Kconfig | 8 ++++++++ kernel/trace/ftrace.c | 6 +++--- kernel/trace/trace_selftest.c | 2 +- 6 files changed, 16 insertions(+), 8 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 79795af59810..996ccecc694c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -44,6 +44,7 @@ config X86 select HAVE_FENTRY if X86_64 select HAVE_C_RECORDMCOUNT select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_FP_TEST diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 9a25b522d377..86cb51e1ca96 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -44,7 +44,6 @@ #ifdef CONFIG_DYNAMIC_FTRACE #define ARCH_SUPPORTS_FTRACE_OPS 1 -#define ARCH_SUPPORTS_FTRACE_SAVE_REGS #endif #ifndef __ASSEMBLY__ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 92691d85c320..e5ca8ef50e9b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -74,7 +74,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, * SAVE_REGS - The ftrace_ops wants regs saved at each function called * and passed to the callback. If this flag is set, but the * architecture does not support passing regs - * (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the + * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the * ftrace_ops will fail to register, unless the next flag * is set. * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the @@ -418,7 +418,7 @@ void ftrace_modify_all_code(int command); #endif #ifndef FTRACE_REGS_ADDR -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) #else # define FTRACE_REGS_ADDR FTRACE_ADDR @@ -480,7 +480,7 @@ extern int ftrace_make_nop(struct module *mod, */ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS /** * ftrace_modify_call - convert from one addr to another (no nop) * @rec: the mcount call site record diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 5d89335a485f..cdc9d284d24e 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -39,6 +39,9 @@ config HAVE_DYNAMIC_FTRACE help See Documentation/trace/ftrace-design.txt +config HAVE_DYNAMIC_FTRACE_WITH_REGS + bool + config HAVE_FTRACE_MCOUNT_RECORD bool help @@ -434,6 +437,11 @@ config DYNAMIC_FTRACE were made. If so, it runs stop_machine (stops all CPUS) and modifies the code to jump over the call to ftrace. +config DYNAMIC_FTRACE_WITH_REGS + def_bool y + depends on DYNAMIC_FTRACE + depends on HAVE_DYNAMIC_FTRACE_WITH_REGS + config FUNCTION_PROFILER bool "Kernel function profiler" depends on FUNCTION_TRACER diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 41473b4ad7a4..6e34dc162fe1 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -337,7 +337,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) return -EINVAL; -#ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS +#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS /* * If the ftrace_ops specifies SAVE_REGS, then it only can be used * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. @@ -4143,8 +4143,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, * Archs are to support both the regs and ftrace_ops at the same time. * If they support ftrace_ops, it is assumed they support regs. * If call backs want to use regs, they must either check for regs - * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS. - * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved. + * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. + * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. * An architecture can pass partial regs with ftrace_ops and still * set the ARCH_SUPPORT_FTARCE_OPS. */ diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 47623169a815..6c62d58d8e87 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -568,7 +568,7 @@ trace_selftest_function_regs(void) int ret; int supported = 0; -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS supported = 1; #endif -- cgit v1.2.3-59-g8ed1b From e7dbfe349d12eabb7783b117e0c115f6f3d9ef9e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 28 Sep 2012 17:15:20 +0900 Subject: kprobes/x86: Move ftrace-based kprobe code into kprobes-ftrace.c Split ftrace-based kprobes code from kprobes, and introduce CONFIG_(HAVE_)KPROBES_ON_FTRACE Kconfig flags. For the cleanup reason, this also moves kprobe_ftrace check into skip_singlestep. Link: http://lkml.kernel.org/r/20120928081520.3560.25624.stgit@ltc138.sdl.hitachi.co.jp Cc: Ingo Molnar Cc: Ananth N Mavinakayanahalli Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Frederic Weisbecker Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt --- arch/Kconfig | 12 ++++++ arch/x86/Kconfig | 1 + arch/x86/kernel/Makefile | 1 + arch/x86/kernel/kprobes-common.h | 11 +++++ arch/x86/kernel/kprobes-ftrace.c | 93 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/kprobes.c | 70 +----------------------------- include/linux/kprobes.h | 12 +----- kernel/kprobes.c | 8 ++-- 8 files changed, 125 insertions(+), 83 deletions(-) create mode 100644 arch/x86/kernel/kprobes-ftrace.c diff --git a/arch/Kconfig b/arch/Kconfig index 7f8f281f2585..97fb7d0365d1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -76,6 +76,15 @@ config OPTPROBES depends on KPROBES && HAVE_OPTPROBES depends on !PREEMPT +config KPROBES_ON_FTRACE + def_bool y + depends on KPROBES && HAVE_KPROBES_ON_FTRACE + depends on DYNAMIC_FTRACE_WITH_REGS + help + If function tracer is enabled and the arch supports full + passing of pt_regs to function tracing, then kprobes can + optimize on top of function tracing. + config UPROBES bool "Transparent user-space probes (EXPERIMENTAL)" depends on UPROBE_EVENT && PERF_EVENTS @@ -158,6 +167,9 @@ config HAVE_KRETPROBES config HAVE_OPTPROBES bool +config HAVE_KPROBES_ON_FTRACE + bool + config HAVE_NMI_WATCHDOG bool # diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 996ccecc694c..be8b2b3ab979 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -40,6 +40,7 @@ config X86 select HAVE_DMA_CONTIGUOUS if !SWIOTLB select HAVE_KRETPROBES select HAVE_OPTPROBES + select HAVE_KPROBES_ON_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FENTRY if X86_64 select HAVE_C_RECORDMCOUNT diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 34e923a53762..cc5d31f8830c 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_OPTPROBES) += kprobes-opt.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o obj-$(CONFIG_KGDB) += kgdb.o diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes-common.h index 3230b68ef29a..2e9d4b5af036 100644 --- a/arch/x86/kernel/kprobes-common.h +++ b/arch/x86/kernel/kprobes-common.h @@ -99,4 +99,15 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig return addr; } #endif + +#ifdef CONFIG_KPROBES_ON_FTRACE +extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb); +#else +static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + return 0; +} +#endif #endif diff --git a/arch/x86/kernel/kprobes-ftrace.c b/arch/x86/kernel/kprobes-ftrace.c new file mode 100644 index 000000000000..70a81c7aa0a7 --- /dev/null +++ b/arch/x86/kernel/kprobes-ftrace.c @@ -0,0 +1,93 @@ +/* + * Dynamic Ftrace based Kprobes Optimization + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) Hitachi Ltd., 2012 + */ +#include +#include +#include +#include +#include + +#include "kprobes-common.h" + +static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + /* + * Emulate singlestep (and also recover regs->ip) + * as if there is a 5byte nop + */ + regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + __this_cpu_write(current_kprobe, NULL); + return 1; +} + +int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + if (kprobe_ftrace(p)) + return __skip_singlestep(p, regs, kcb); + else + return 0; +} + +/* Ftrace callback handler for kprobes */ +void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + unsigned long flags; + + /* Disable irq for emulating a breakpoint and avoiding preempt */ + local_irq_save(flags); + + p = get_kprobe((kprobe_opcode_t *)ip); + if (unlikely(!p) || kprobe_disabled(p)) + goto end; + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ + regs->ip = ip + sizeof(kprobe_opcode_t); + + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) + __skip_singlestep(p, regs, kcb); + /* + * If pre_handler returns !0, it sets regs->ip and + * resets current kprobe. + */ + } +end: + local_irq_restore(flags); +} + +int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.insn = NULL; + p->ainsn.boostable = -1; + return 0; +} diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 57916c0d3cf6..18114bfb10f3 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -541,23 +541,6 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb return 1; } -#ifdef KPROBES_CAN_USE_FTRACE -static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - /* - * Emulate singlestep (and also recover regs->ip) - * as if there is a 5byte nop - */ - regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; - if (unlikely(p->post_handler)) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - p->post_handler(p, regs, 0); - } - __this_cpu_write(current_kprobe, NULL); -} -#endif - /* * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled throughout this function. @@ -616,13 +599,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) } else if (kprobe_running()) { p = __this_cpu_read(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) { -#ifdef KPROBES_CAN_USE_FTRACE - if (kprobe_ftrace(p)) { - skip_singlestep(p, regs, kcb); - return 1; - } -#endif - setup_singlestep(p, regs, kcb, 0); + if (!skip_singlestep(p, regs, kcb)) + setup_singlestep(p, regs, kcb, 0); return 1; } } /* else: not a kprobe fault; let the kernel handle it */ @@ -1075,50 +1053,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } -#ifdef KPROBES_CAN_USE_FTRACE -/* Ftrace callback handler for kprobes */ -void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *ops, struct pt_regs *regs) -{ - struct kprobe *p; - struct kprobe_ctlblk *kcb; - unsigned long flags; - - /* Disable irq for emulating a breakpoint and avoiding preempt */ - local_irq_save(flags); - - p = get_kprobe((kprobe_opcode_t *)ip); - if (unlikely(!p) || kprobe_disabled(p)) - goto end; - - kcb = get_kprobe_ctlblk(); - if (kprobe_running()) { - kprobes_inc_nmissed_count(p); - } else { - /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ - regs->ip = ip + sizeof(kprobe_opcode_t); - - __this_cpu_write(current_kprobe, p); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (!p->pre_handler || !p->pre_handler(p, regs)) - skip_singlestep(p, regs, kcb); - /* - * If pre_handler returns !0, it sets regs->ip and - * resets current kprobe. - */ - } -end: - local_irq_restore(flags); -} - -int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) -{ - p->ainsn.insn = NULL; - p->ainsn.boostable = -1; - return 0; -} -#endif - int __init arch_init_kprobes(void) { return arch_init_optprobes(); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 23755ba42abc..4b6ef4d33cc2 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -49,16 +49,6 @@ #define KPROBE_REENTER 0x00000004 #define KPROBE_HIT_SSDONE 0x00000008 -/* - * If function tracer is enabled and the arch supports full - * passing of pt_regs to function tracing, then kprobes can - * optimize on top of function tracing. - */ -#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \ - && defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE) -# define KPROBES_CAN_USE_FTRACE -#endif - /* Attach to insert probes on any functions which should be ignored*/ #define __kprobes __attribute__((__section__(".kprobes.text"))) @@ -316,7 +306,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, #endif #endif /* CONFIG_OPTPROBES */ -#ifdef KPROBES_CAN_USE_FTRACE +#ifdef CONFIG_KPROBES_ON_FTRACE extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs); extern int arch_prepare_kprobe_ftrace(struct kprobe *p); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 098f396aa409..f423c3ef4a82 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -919,7 +919,7 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) } #endif /* CONFIG_OPTPROBES */ -#ifdef KPROBES_CAN_USE_FTRACE +#ifdef CONFIG_KPROBES_ON_FTRACE static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { .func = kprobe_ftrace_handler, .flags = FTRACE_OPS_FL_SAVE_REGS, @@ -964,7 +964,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p) (unsigned long)p->addr, 1, 0); WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); } -#else /* !KPROBES_CAN_USE_FTRACE */ +#else /* !CONFIG_KPROBES_ON_FTRACE */ #define prepare_kprobe(p) arch_prepare_kprobe(p) #define arm_kprobe_ftrace(p) do {} while (0) #define disarm_kprobe_ftrace(p) do {} while (0) @@ -1414,12 +1414,12 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p, */ ftrace_addr = ftrace_location((unsigned long)p->addr); if (ftrace_addr) { -#ifdef KPROBES_CAN_USE_FTRACE +#ifdef CONFIG_KPROBES_ON_FTRACE /* Given address is not on the instruction boundary */ if ((unsigned long)p->addr != ftrace_addr) return -EILSEQ; p->flags |= KPROBE_FLAG_FTRACE; -#else /* !KPROBES_CAN_USE_FTRACE */ +#else /* !CONFIG_KPROBES_ON_FTRACE */ return -EINVAL; #endif } -- cgit v1.2.3-59-g8ed1b From f684199f5de805ac50ea5bdec2b082882586a777 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 28 Sep 2012 17:15:22 +0900 Subject: kprobes/x86: Move kprobes stuff under arch/x86/kernel/kprobes/ Move arch-dep kprobes stuff under arch/x86/kernel/kprobes. Link: http://lkml.kernel.org/r/20120928081522.3560.75469.stgit@ltc138.sdl.hitachi.co.jp Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Ananth N Mavinakayanahalli Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Masami Hiramatsu [ fixed whitespace and s/__attribute__((packed))/__packed/ ] Signed-off-by: Steven Rostedt --- arch/x86/kernel/Makefile | 4 +- arch/x86/kernel/kprobes-common.h | 113 ---- arch/x86/kernel/kprobes-ftrace.c | 93 ---- arch/x86/kernel/kprobes-opt.c | 512 ------------------ arch/x86/kernel/kprobes.c | 1064 -------------------------------------- arch/x86/kernel/kprobes/Makefile | 7 + arch/x86/kernel/kprobes/common.h | 113 ++++ arch/x86/kernel/kprobes/core.c | 1064 ++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/kprobes/ftrace.c | 93 ++++ arch/x86/kernel/kprobes/opt.c | 512 ++++++++++++++++++ 10 files changed, 1790 insertions(+), 1785 deletions(-) delete mode 100644 arch/x86/kernel/kprobes-common.h delete mode 100644 arch/x86/kernel/kprobes-ftrace.c delete mode 100644 arch/x86/kernel/kprobes-opt.c delete mode 100644 arch/x86/kernel/kprobes.c create mode 100644 arch/x86/kernel/kprobes/Makefile create mode 100644 arch/x86/kernel/kprobes/common.h create mode 100644 arch/x86/kernel/kprobes/core.c create mode 100644 arch/x86/kernel/kprobes/ftrace.c create mode 100644 arch/x86/kernel/kprobes/opt.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index cc5d31f8830c..ac3b3d002833 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -65,9 +65,7 @@ obj-$(CONFIG_X86_TSC) += trace_clock.o obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o -obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_OPTPROBES) += kprobes-opt.o -obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o +obj-y += kprobes/ obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o obj-$(CONFIG_KGDB) += kgdb.o diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes-common.h deleted file mode 100644 index 2e9d4b5af036..000000000000 --- a/arch/x86/kernel/kprobes-common.h +++ /dev/null @@ -1,113 +0,0 @@ -#ifndef __X86_KERNEL_KPROBES_COMMON_H -#define __X86_KERNEL_KPROBES_COMMON_H - -/* Kprobes and Optprobes common header */ - -#ifdef CONFIG_X86_64 -#define SAVE_REGS_STRING \ - /* Skip cs, ip, orig_ax. */ \ - " subq $24, %rsp\n" \ - " pushq %rdi\n" \ - " pushq %rsi\n" \ - " pushq %rdx\n" \ - " pushq %rcx\n" \ - " pushq %rax\n" \ - " pushq %r8\n" \ - " pushq %r9\n" \ - " pushq %r10\n" \ - " pushq %r11\n" \ - " pushq %rbx\n" \ - " pushq %rbp\n" \ - " pushq %r12\n" \ - " pushq %r13\n" \ - " pushq %r14\n" \ - " pushq %r15\n" -#define RESTORE_REGS_STRING \ - " popq %r15\n" \ - " popq %r14\n" \ - " popq %r13\n" \ - " popq %r12\n" \ - " popq %rbp\n" \ - " popq %rbx\n" \ - " popq %r11\n" \ - " popq %r10\n" \ - " popq %r9\n" \ - " popq %r8\n" \ - " popq %rax\n" \ - " popq %rcx\n" \ - " popq %rdx\n" \ - " popq %rsi\n" \ - " popq %rdi\n" \ - /* Skip orig_ax, ip, cs */ \ - " addq $24, %rsp\n" -#else -#define SAVE_REGS_STRING \ - /* Skip cs, ip, orig_ax and gs. */ \ - " subl $16, %esp\n" \ - " pushl %fs\n" \ - " pushl %es\n" \ - " pushl %ds\n" \ - " pushl %eax\n" \ - " pushl %ebp\n" \ - " pushl %edi\n" \ - " pushl %esi\n" \ - " pushl %edx\n" \ - " pushl %ecx\n" \ - " pushl %ebx\n" -#define RESTORE_REGS_STRING \ - " popl %ebx\n" \ - " popl %ecx\n" \ - " popl %edx\n" \ - " popl %esi\n" \ - " popl %edi\n" \ - " popl %ebp\n" \ - " popl %eax\n" \ - /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\ - " addl $24, %esp\n" -#endif - -/* Ensure if the instruction can be boostable */ -extern int can_boost(kprobe_opcode_t *instruction); -/* Recover instruction if given address is probed */ -extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, - unsigned long addr); -/* - * Copy an instruction and adjust the displacement if the instruction - * uses the %rip-relative addressing mode. - */ -extern int __copy_instruction(u8 *dest, u8 *src); - -/* Generate a relative-jump/call instruction */ -extern void synthesize_reljump(void *from, void *to); -extern void synthesize_relcall(void *from, void *to); - -#ifdef CONFIG_OPTPROBES -extern int arch_init_optprobes(void); -extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter); -extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr); -#else /* !CONFIG_OPTPROBES */ -static inline int arch_init_optprobes(void) -{ - return 0; -} -static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) -{ - return 0; -} -static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) -{ - return addr; -} -#endif - -#ifdef CONFIG_KPROBES_ON_FTRACE -extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb); -#else -static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - return 0; -} -#endif -#endif diff --git a/arch/x86/kernel/kprobes-ftrace.c b/arch/x86/kernel/kprobes-ftrace.c deleted file mode 100644 index 70a81c7aa0a7..000000000000 --- a/arch/x86/kernel/kprobes-ftrace.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Dynamic Ftrace based Kprobes Optimization - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) Hitachi Ltd., 2012 - */ -#include -#include -#include -#include -#include - -#include "kprobes-common.h" - -static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - /* - * Emulate singlestep (and also recover regs->ip) - * as if there is a 5byte nop - */ - regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; - if (unlikely(p->post_handler)) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - p->post_handler(p, regs, 0); - } - __this_cpu_write(current_kprobe, NULL); - return 1; -} - -int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - if (kprobe_ftrace(p)) - return __skip_singlestep(p, regs, kcb); - else - return 0; -} - -/* Ftrace callback handler for kprobes */ -void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *ops, struct pt_regs *regs) -{ - struct kprobe *p; - struct kprobe_ctlblk *kcb; - unsigned long flags; - - /* Disable irq for emulating a breakpoint and avoiding preempt */ - local_irq_save(flags); - - p = get_kprobe((kprobe_opcode_t *)ip); - if (unlikely(!p) || kprobe_disabled(p)) - goto end; - - kcb = get_kprobe_ctlblk(); - if (kprobe_running()) { - kprobes_inc_nmissed_count(p); - } else { - /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ - regs->ip = ip + sizeof(kprobe_opcode_t); - - __this_cpu_write(current_kprobe, p); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (!p->pre_handler || !p->pre_handler(p, regs)) - __skip_singlestep(p, regs, kcb); - /* - * If pre_handler returns !0, it sets regs->ip and - * resets current kprobe. - */ - } -end: - local_irq_restore(flags); -} - -int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) -{ - p->ainsn.insn = NULL; - p->ainsn.boostable = -1; - return 0; -} diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c deleted file mode 100644 index c5e410eed403..000000000000 --- a/arch/x86/kernel/kprobes-opt.c +++ /dev/null @@ -1,512 +0,0 @@ -/* - * Kernel Probes Jump Optimization (Optprobes) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2002, 2004 - * Copyright (C) Hitachi Ltd., 2012 - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "kprobes-common.h" - -unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) -{ - struct optimized_kprobe *op; - struct kprobe *kp; - long offs; - int i; - - for (i = 0; i < RELATIVEJUMP_SIZE; i++) { - kp = get_kprobe((void *)addr - i); - /* This function only handles jump-optimized kprobe */ - if (kp && kprobe_optimized(kp)) { - op = container_of(kp, struct optimized_kprobe, kp); - /* If op->list is not empty, op is under optimizing */ - if (list_empty(&op->list)) - goto found; - } - } - - return addr; -found: - /* - * If the kprobe can be optimized, original bytes which can be - * overwritten by jump destination address. In this case, original - * bytes must be recovered from op->optinsn.copied_insn buffer. - */ - memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); - if (addr == (unsigned long)kp->addr) { - buf[0] = kp->opcode; - memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); - } else { - offs = addr - (unsigned long)kp->addr - 1; - memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); - } - - return (unsigned long)buf; -} - -/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ -static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) -{ -#ifdef CONFIG_X86_64 - *addr++ = 0x48; - *addr++ = 0xbf; -#else - *addr++ = 0xb8; -#endif - *(unsigned long *)addr = val; -} - -static void __used __kprobes kprobes_optinsn_template_holder(void) -{ - asm volatile ( - ".global optprobe_template_entry\n" - "optprobe_template_entry:\n" -#ifdef CONFIG_X86_64 - /* We don't bother saving the ss register */ - " pushq %rsp\n" - " pushfq\n" - SAVE_REGS_STRING - " movq %rsp, %rsi\n" - ".global optprobe_template_val\n" - "optprobe_template_val:\n" - ASM_NOP5 - ASM_NOP5 - ".global optprobe_template_call\n" - "optprobe_template_call:\n" - ASM_NOP5 - /* Move flags to rsp */ - " movq 144(%rsp), %rdx\n" - " movq %rdx, 152(%rsp)\n" - RESTORE_REGS_STRING - /* Skip flags entry */ - " addq $8, %rsp\n" - " popfq\n" -#else /* CONFIG_X86_32 */ - " pushf\n" - SAVE_REGS_STRING - " movl %esp, %edx\n" - ".global optprobe_template_val\n" - "optprobe_template_val:\n" - ASM_NOP5 - ".global optprobe_template_call\n" - "optprobe_template_call:\n" - ASM_NOP5 - RESTORE_REGS_STRING - " addl $4, %esp\n" /* skip cs */ - " popf\n" -#endif - ".global optprobe_template_end\n" - "optprobe_template_end:\n"); -} - -#define TMPL_MOVE_IDX \ - ((long)&optprobe_template_val - (long)&optprobe_template_entry) -#define TMPL_CALL_IDX \ - ((long)&optprobe_template_call - (long)&optprobe_template_entry) -#define TMPL_END_IDX \ - ((long)&optprobe_template_end - (long)&optprobe_template_entry) - -#define INT3_SIZE sizeof(kprobe_opcode_t) - -/* Optimized kprobe call back function: called from optinsn */ -static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long flags; - - /* This is possible if op is under delayed unoptimizing */ - if (kprobe_disabled(&op->kp)) - return; - - local_irq_save(flags); - if (kprobe_running()) { - kprobes_inc_nmissed_count(&op->kp); - } else { - /* Save skipped registers */ -#ifdef CONFIG_X86_64 - regs->cs = __KERNEL_CS; -#else - regs->cs = __KERNEL_CS | get_kernel_rpl(); - regs->gs = 0; -#endif - regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; - regs->orig_ax = ~0UL; - - __this_cpu_write(current_kprobe, &op->kp); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; - opt_pre_handler(&op->kp, regs); - __this_cpu_write(current_kprobe, NULL); - } - local_irq_restore(flags); -} - -static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) -{ - int len = 0, ret; - - while (len < RELATIVEJUMP_SIZE) { - ret = __copy_instruction(dest + len, src + len); - if (!ret || !can_boost(dest + len)) - return -EINVAL; - len += ret; - } - /* Check whether the address range is reserved */ - if (ftrace_text_reserved(src, src + len - 1) || - alternatives_text_reserved(src, src + len - 1) || - jump_label_text_reserved(src, src + len - 1)) - return -EBUSY; - - return len; -} - -/* Check whether insn is indirect jump */ -static int __kprobes insn_is_indirect_jump(struct insn *insn) -{ - return ((insn->opcode.bytes[0] == 0xff && - (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ - insn->opcode.bytes[0] == 0xea); /* Segment based jump */ -} - -/* Check whether insn jumps into specified address range */ -static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) -{ - unsigned long target = 0; - - switch (insn->opcode.bytes[0]) { - case 0xe0: /* loopne */ - case 0xe1: /* loope */ - case 0xe2: /* loop */ - case 0xe3: /* jcxz */ - case 0xe9: /* near relative jump */ - case 0xeb: /* short relative jump */ - break; - case 0x0f: - if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ - break; - return 0; - default: - if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ - break; - return 0; - } - target = (unsigned long)insn->next_byte + insn->immediate.value; - - return (start <= target && target <= start + len); -} - -/* Decode whole function to ensure any instructions don't jump into target */ -static int __kprobes can_optimize(unsigned long paddr) -{ - unsigned long addr, size = 0, offset = 0; - struct insn insn; - kprobe_opcode_t buf[MAX_INSN_SIZE]; - - /* Lookup symbol including addr */ - if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) - return 0; - - /* - * Do not optimize in the entry code due to the unstable - * stack handling. - */ - if ((paddr >= (unsigned long)__entry_text_start) && - (paddr < (unsigned long)__entry_text_end)) - return 0; - - /* Check there is enough space for a relative jump. */ - if (size - offset < RELATIVEJUMP_SIZE) - return 0; - - /* Decode instructions */ - addr = paddr - offset; - while (addr < paddr - offset + size) { /* Decode until function end */ - if (search_exception_tables(addr)) - /* - * Since some fixup code will jumps into this function, - * we can't optimize kprobe in this function. - */ - return 0; - kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr)); - insn_get_length(&insn); - /* Another subsystem puts a breakpoint */ - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) - return 0; - /* Recover address */ - insn.kaddr = (void *)addr; - insn.next_byte = (void *)(addr + insn.length); - /* Check any instructions don't jump into target */ - if (insn_is_indirect_jump(&insn) || - insn_jump_into_range(&insn, paddr + INT3_SIZE, - RELATIVE_ADDR_SIZE)) - return 0; - addr += insn.length; - } - - return 1; -} - -/* Check optimized_kprobe can actually be optimized. */ -int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) -{ - int i; - struct kprobe *p; - - for (i = 1; i < op->optinsn.size; i++) { - p = get_kprobe(op->kp.addr + i); - if (p && !kprobe_disabled(p)) - return -EEXIST; - } - - return 0; -} - -/* Check the addr is within the optimized instructions. */ -int __kprobes -arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) -{ - return ((unsigned long)op->kp.addr <= addr && - (unsigned long)op->kp.addr + op->optinsn.size > addr); -} - -/* Free optimized instruction slot */ -static __kprobes -void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) -{ - if (op->optinsn.insn) { - free_optinsn_slot(op->optinsn.insn, dirty); - op->optinsn.insn = NULL; - op->optinsn.size = 0; - } -} - -void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) -{ - __arch_remove_optimized_kprobe(op, 1); -} - -/* - * Copy replacing target instructions - * Target instructions MUST be relocatable (checked inside) - * This is called when new aggr(opt)probe is allocated or reused. - */ -int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) -{ - u8 *buf; - int ret; - long rel; - - if (!can_optimize((unsigned long)op->kp.addr)) - return -EILSEQ; - - op->optinsn.insn = get_optinsn_slot(); - if (!op->optinsn.insn) - return -ENOMEM; - - /* - * Verify if the address gap is in 2GB range, because this uses - * a relative jump. - */ - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; - if (abs(rel) > 0x7fffffff) - return -ERANGE; - - buf = (u8 *)op->optinsn.insn; - - /* Copy instructions into the out-of-line buffer */ - ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); - if (ret < 0) { - __arch_remove_optimized_kprobe(op, 0); - return ret; - } - op->optinsn.size = ret; - - /* Copy arch-dep-instance from template */ - memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); - - /* Set probe information */ - synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); - - /* Set probe function call */ - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); - - /* Set returning jmp instruction at the tail of out-of-line buffer */ - synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, - (u8 *)op->kp.addr + op->optinsn.size); - - flush_icache_range((unsigned long) buf, - (unsigned long) buf + TMPL_END_IDX + - op->optinsn.size + RELATIVEJUMP_SIZE); - return 0; -} - -#define MAX_OPTIMIZE_PROBES 256 -static struct text_poke_param *jump_poke_params; -static struct jump_poke_buffer { - u8 buf[RELATIVEJUMP_SIZE]; -} *jump_poke_bufs; - -static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, - u8 *insn_buf, - struct optimized_kprobe *op) -{ - s32 rel = (s32)((long)op->optinsn.insn - - ((long)op->kp.addr + RELATIVEJUMP_SIZE)); - - /* Backup instructions which will be replaced by jump address */ - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, - RELATIVE_ADDR_SIZE); - - insn_buf[0] = RELATIVEJUMP_OPCODE; - *(s32 *)(&insn_buf[1]) = rel; - - tprm->addr = op->kp.addr; - tprm->opcode = insn_buf; - tprm->len = RELATIVEJUMP_SIZE; -} - -/* - * Replace breakpoints (int3) with relative jumps. - * Caller must call with locking kprobe_mutex and text_mutex. - */ -void __kprobes arch_optimize_kprobes(struct list_head *oplist) -{ - struct optimized_kprobe *op, *tmp; - int c = 0; - - list_for_each_entry_safe(op, tmp, oplist, list) { - WARN_ON(kprobe_disabled(&op->kp)); - /* Setup param */ - setup_optimize_kprobe(&jump_poke_params[c], - jump_poke_bufs[c].buf, op); - list_del_init(&op->list); - if (++c >= MAX_OPTIMIZE_PROBES) - break; - } - - /* - * text_poke_smp doesn't support NMI/MCE code modifying. - * However, since kprobes itself also doesn't support NMI/MCE - * code probing, it's not a problem. - */ - text_poke_smp_batch(jump_poke_params, c); -} - -static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, - u8 *insn_buf, - struct optimized_kprobe *op) -{ - /* Set int3 to first byte for kprobes */ - insn_buf[0] = BREAKPOINT_INSTRUCTION; - memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); - - tprm->addr = op->kp.addr; - tprm->opcode = insn_buf; - tprm->len = RELATIVEJUMP_SIZE; -} - -/* - * Recover original instructions and breakpoints from relative jumps. - * Caller must call with locking kprobe_mutex. - */ -extern void arch_unoptimize_kprobes(struct list_head *oplist, - struct list_head *done_list) -{ - struct optimized_kprobe *op, *tmp; - int c = 0; - - list_for_each_entry_safe(op, tmp, oplist, list) { - /* Setup param */ - setup_unoptimize_kprobe(&jump_poke_params[c], - jump_poke_bufs[c].buf, op); - list_move(&op->list, done_list); - if (++c >= MAX_OPTIMIZE_PROBES) - break; - } - - /* - * text_poke_smp doesn't support NMI/MCE code modifying. - * However, since kprobes itself also doesn't support NMI/MCE - * code probing, it's not a problem. - */ - text_poke_smp_batch(jump_poke_params, c); -} - -/* Replace a relative jump with a breakpoint (int3). */ -void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) -{ - u8 buf[RELATIVEJUMP_SIZE]; - - /* Set int3 to first byte for kprobes */ - buf[0] = BREAKPOINT_INSTRUCTION; - memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); - text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); -} - -int __kprobes -setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) -{ - struct optimized_kprobe *op; - - if (p->flags & KPROBE_FLAG_OPTIMIZED) { - /* This kprobe is really able to run optimized path. */ - op = container_of(p, struct optimized_kprobe, kp); - /* Detour through copied instructions */ - regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; - if (!reenter) - reset_current_kprobe(); - preempt_enable_no_resched(); - return 1; - } - return 0; -} - -int __kprobes arch_init_optprobes(void) -{ - /* Allocate code buffer and parameter array */ - jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * - MAX_OPTIMIZE_PROBES, GFP_KERNEL); - if (!jump_poke_bufs) - return -ENOMEM; - - jump_poke_params = kmalloc(sizeof(struct text_poke_param) * - MAX_OPTIMIZE_PROBES, GFP_KERNEL); - if (!jump_poke_params) { - kfree(jump_poke_bufs); - jump_poke_bufs = NULL; - return -ENOMEM; - } - - return 0; -} diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c deleted file mode 100644 index 18114bfb10f3..000000000000 --- a/arch/x86/kernel/kprobes.c +++ /dev/null @@ -1,1064 +0,0 @@ -/* - * Kernel Probes (KProbes) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2002, 2004 - * - * 2002-Oct Created by Vamsi Krishna S Kernel - * Probes initial implementation ( includes contributions from - * Rusty Russell). - * 2004-July Suparna Bhattacharya added jumper probes - * interface to access function arguments. - * 2004-Oct Jim Keniston and Prasanna S Panchamukhi - * adapted for x86_64 from i386. - * 2005-Mar Roland McGrath - * Fixed to handle %rip-relative addressing mode correctly. - * 2005-May Hien Nguyen , Jim Keniston - * and Prasanna S Panchamukhi - * added function-return probes. - * 2005-May Rusty Lynch - * Added function return probes functionality - * 2006-Feb Masami Hiramatsu added - * kprobe-booster and kretprobe-booster for i386. - * 2007-Dec Masami Hiramatsu added kprobe-booster - * and kretprobe-booster for x86-64 - * 2007-Dec Masami Hiramatsu , Arjan van de Ven - * and Jim Keniston - * unified x86 kprobes code. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "kprobes-common.h" - -void jprobe_return_end(void); - -DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; -DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); - -#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs)) - -#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ - (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ - (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ - (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ - (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ - << (row % 32)) - /* - * Undefined/reserved opcodes, conditional jump, Opcode Extension - * Groups, and some special opcodes can not boost. - * This is non-const and volatile to keep gcc from statically - * optimizing it out, as variable_test_bit makes gcc think only - * *(unsigned long*) is used. - */ -static volatile u32 twobyte_is_boostable[256 / 32] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ---------------------------------------------- */ - W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ - W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */ - W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ - W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ - W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ - W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ - W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ - W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ - W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ - W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ - W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ - W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ - W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ - W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ - W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ - W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ - /* ----------------------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; -#undef W - -struct kretprobe_blackpoint kretprobe_blacklist[] = { - {"__switch_to", }, /* This function switches only current task, but - doesn't switch kernel stack.*/ - {NULL, NULL} /* Terminator */ -}; - -const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); - -static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) -{ - struct __arch_relative_insn { - u8 op; - s32 raddr; - } __attribute__((packed)) *insn; - - insn = (struct __arch_relative_insn *)from; - insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); - insn->op = op; -} - -/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ -void __kprobes synthesize_reljump(void *from, void *to) -{ - __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); -} - -/* Insert a call instruction at address 'from', which calls address 'to'.*/ -void __kprobes synthesize_relcall(void *from, void *to) -{ - __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); -} - -/* - * Skip the prefixes of the instruction. - */ -static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) -{ - insn_attr_t attr; - - attr = inat_get_opcode_attribute((insn_byte_t)*insn); - while (inat_is_legacy_prefix(attr)) { - insn++; - attr = inat_get_opcode_attribute((insn_byte_t)*insn); - } -#ifdef CONFIG_X86_64 - if (inat_is_rex_prefix(attr)) - insn++; -#endif - return insn; -} - -/* - * Returns non-zero if opcode is boostable. - * RIP relative instructions are adjusted at copying time in 64 bits mode - */ -int __kprobes can_boost(kprobe_opcode_t *opcodes) -{ - kprobe_opcode_t opcode; - kprobe_opcode_t *orig_opcodes = opcodes; - - if (search_exception_tables((unsigned long)opcodes)) - return 0; /* Page fault may occur on this address. */ - -retry: - if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) - return 0; - opcode = *(opcodes++); - - /* 2nd-byte opcode */ - if (opcode == 0x0f) { - if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) - return 0; - return test_bit(*opcodes, - (unsigned long *)twobyte_is_boostable); - } - - switch (opcode & 0xf0) { -#ifdef CONFIG_X86_64 - case 0x40: - goto retry; /* REX prefix is boostable */ -#endif - case 0x60: - if (0x63 < opcode && opcode < 0x67) - goto retry; /* prefixes */ - /* can't boost Address-size override and bound */ - return (opcode != 0x62 && opcode != 0x67); - case 0x70: - return 0; /* can't boost conditional jump */ - case 0xc0: - /* can't boost software-interruptions */ - return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; - case 0xd0: - /* can boost AA* and XLAT */ - return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); - case 0xe0: - /* can boost in/out and absolute jmps */ - return ((opcode & 0x04) || opcode == 0xea); - case 0xf0: - if ((opcode & 0x0c) == 0 && opcode != 0xf1) - goto retry; /* lock/rep(ne) prefix */ - /* clear and set flags are boostable */ - return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); - default: - /* segment override prefixes are boostable */ - if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) - goto retry; /* prefixes */ - /* CS override prefix and call are not boostable */ - return (opcode != 0x2e && opcode != 0x9a); - } -} - -static unsigned long -__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) -{ - struct kprobe *kp; - - kp = get_kprobe((void *)addr); - /* There is no probe, return original address */ - if (!kp) - return addr; - - /* - * Basically, kp->ainsn.insn has an original instruction. - * However, RIP-relative instruction can not do single-stepping - * at different place, __copy_instruction() tweaks the displacement of - * that instruction. In that case, we can't recover the instruction - * from the kp->ainsn.insn. - * - * On the other hand, kp->opcode has a copy of the first byte of - * the probed instruction, which is overwritten by int3. And - * the instruction at kp->addr is not modified by kprobes except - * for the first byte, we can recover the original instruction - * from it and kp->opcode. - */ - memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); - buf[0] = kp->opcode; - return (unsigned long)buf; -} - -/* - * Recover the probed instruction at addr for further analysis. - * Caller must lock kprobes by kprobe_mutex, or disable preemption - * for preventing to release referencing kprobes. - */ -unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) -{ - unsigned long __addr; - - __addr = __recover_optprobed_insn(buf, addr); - if (__addr != addr) - return __addr; - - return __recover_probed_insn(buf, addr); -} - -/* Check if paddr is at an instruction boundary */ -static int __kprobes can_probe(unsigned long paddr) -{ - unsigned long addr, __addr, offset = 0; - struct insn insn; - kprobe_opcode_t buf[MAX_INSN_SIZE]; - - if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) - return 0; - - /* Decode instructions */ - addr = paddr - offset; - while (addr < paddr) { - /* - * Check if the instruction has been modified by another - * kprobe, in which case we replace the breakpoint by the - * original instruction in our buffer. - * Also, jump optimization will change the breakpoint to - * relative-jump. Since the relative-jump itself is - * normally used, we just go through if there is no kprobe. - */ - __addr = recover_probed_instruction(buf, addr); - kernel_insn_init(&insn, (void *)__addr); - insn_get_length(&insn); - - /* - * Another debugging subsystem might insert this breakpoint. - * In that case, we can't recover it. - */ - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) - return 0; - addr += insn.length; - } - - return (addr == paddr); -} - -/* - * Returns non-zero if opcode modifies the interrupt flag. - */ -static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) -{ - /* Skip prefixes */ - insn = skip_prefixes(insn); - - switch (*insn) { - case 0xfa: /* cli */ - case 0xfb: /* sti */ - case 0xcf: /* iret/iretd */ - case 0x9d: /* popf/popfd */ - return 1; - } - - return 0; -} - -/* - * Copy an instruction and adjust the displacement if the instruction - * uses the %rip-relative addressing mode. - * If it does, Return the address of the 32-bit displacement word. - * If not, return null. - * Only applicable to 64-bit x86. - */ -int __kprobes __copy_instruction(u8 *dest, u8 *src) -{ - struct insn insn; - kprobe_opcode_t buf[MAX_INSN_SIZE]; - - kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src)); - insn_get_length(&insn); - /* Another subsystem puts a breakpoint, failed to recover */ - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) - return 0; - memcpy(dest, insn.kaddr, insn.length); - -#ifdef CONFIG_X86_64 - if (insn_rip_relative(&insn)) { - s64 newdisp; - u8 *disp; - kernel_insn_init(&insn, dest); - insn_get_displacement(&insn); - /* - * The copied instruction uses the %rip-relative addressing - * mode. Adjust the displacement for the difference between - * the original location of this instruction and the location - * of the copy that will actually be run. The tricky bit here - * is making sure that the sign extension happens correctly in - * this calculation, since we need a signed 32-bit result to - * be sign-extended to 64 bits when it's added to the %rip - * value and yield the same 64-bit result that the sign- - * extension of the original signed 32-bit displacement would - * have given. - */ - newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; - BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ - disp = (u8 *) dest + insn_offset_displacement(&insn); - *(s32 *) disp = (s32) newdisp; - } -#endif - return insn.length; -} - -static void __kprobes arch_copy_kprobe(struct kprobe *p) -{ - /* Copy an instruction with recovering if other optprobe modifies it.*/ - __copy_instruction(p->ainsn.insn, p->addr); - - /* - * __copy_instruction can modify the displacement of the instruction, - * but it doesn't affect boostable check. - */ - if (can_boost(p->ainsn.insn)) - p->ainsn.boostable = 0; - else - p->ainsn.boostable = -1; - - /* Also, displacement change doesn't affect the first byte */ - p->opcode = p->ainsn.insn[0]; -} - -int __kprobes arch_prepare_kprobe(struct kprobe *p) -{ - if (alternatives_text_reserved(p->addr, p->addr)) - return -EINVAL; - - if (!can_probe((unsigned long)p->addr)) - return -EILSEQ; - /* insn: must be on special executable page on x86. */ - p->ainsn.insn = get_insn_slot(); - if (!p->ainsn.insn) - return -ENOMEM; - arch_copy_kprobe(p); - return 0; -} - -void __kprobes arch_arm_kprobe(struct kprobe *p) -{ - text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); -} - -void __kprobes arch_disarm_kprobe(struct kprobe *p) -{ - text_poke(p->addr, &p->opcode, 1); -} - -void __kprobes arch_remove_kprobe(struct kprobe *p) -{ - if (p->ainsn.insn) { - free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); - p->ainsn.insn = NULL; - } -} - -static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) -{ - kcb->prev_kprobe.kp = kprobe_running(); - kcb->prev_kprobe.status = kcb->kprobe_status; - kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; - kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; -} - -static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) -{ - __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); - kcb->kprobe_status = kcb->prev_kprobe.status; - kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; - kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; -} - -static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - __this_cpu_write(current_kprobe, p); - kcb->kprobe_saved_flags = kcb->kprobe_old_flags - = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); - if (is_IF_modifier(p->ainsn.insn)) - kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; -} - -static void __kprobes clear_btf(void) -{ - if (test_thread_flag(TIF_BLOCKSTEP)) { - unsigned long debugctl = get_debugctlmsr(); - - debugctl &= ~DEBUGCTLMSR_BTF; - update_debugctlmsr(debugctl); - } -} - -static void __kprobes restore_btf(void) -{ - if (test_thread_flag(TIF_BLOCKSTEP)) { - unsigned long debugctl = get_debugctlmsr(); - - debugctl |= DEBUGCTLMSR_BTF; - update_debugctlmsr(debugctl); - } -} - -void __kprobes -arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) -{ - unsigned long *sara = stack_addr(regs); - - ri->ret_addr = (kprobe_opcode_t *) *sara; - - /* Replace the return addr with trampoline addr */ - *sara = (unsigned long) &kretprobe_trampoline; -} - -static void __kprobes -setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) -{ - if (setup_detour_execution(p, regs, reenter)) - return; - -#if !defined(CONFIG_PREEMPT) - if (p->ainsn.boostable == 1 && !p->post_handler) { - /* Boost up -- we can execute copied instructions directly */ - if (!reenter) - reset_current_kprobe(); - /* - * Reentering boosted probe doesn't reset current_kprobe, - * nor set current_kprobe, because it doesn't use single - * stepping. - */ - regs->ip = (unsigned long)p->ainsn.insn; - preempt_enable_no_resched(); - return; - } -#endif - if (reenter) { - save_previous_kprobe(kcb); - set_current_kprobe(p, regs, kcb); - kcb->kprobe_status = KPROBE_REENTER; - } else - kcb->kprobe_status = KPROBE_HIT_SS; - /* Prepare real single stepping */ - clear_btf(); - regs->flags |= X86_EFLAGS_TF; - regs->flags &= ~X86_EFLAGS_IF; - /* single step inline if the instruction is an int3 */ - if (p->opcode == BREAKPOINT_INSTRUCTION) - regs->ip = (unsigned long)p->addr; - else - regs->ip = (unsigned long)p->ainsn.insn; -} - -/* - * We have reentered the kprobe_handler(), since another probe was hit while - * within the handler. We save the original kprobes variables and just single - * step on the instruction of the new probe without calling any user handlers. - */ -static int __kprobes -reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) -{ - switch (kcb->kprobe_status) { - case KPROBE_HIT_SSDONE: - case KPROBE_HIT_ACTIVE: - kprobes_inc_nmissed_count(p); - setup_singlestep(p, regs, kcb, 1); - break; - case KPROBE_HIT_SS: - /* A probe has been hit in the codepath leading up to, or just - * after, single-stepping of a probed instruction. This entire - * codepath should strictly reside in .kprobes.text section. - * Raise a BUG or we'll continue in an endless reentering loop - * and eventually a stack overflow. - */ - printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", - p->addr); - dump_kprobe(p); - BUG(); - default: - /* impossible cases */ - WARN_ON(1); - return 0; - } - - return 1; -} - -/* - * Interrupts are disabled on entry as trap3 is an interrupt gate and they - * remain disabled throughout this function. - */ -static int __kprobes kprobe_handler(struct pt_regs *regs) -{ - kprobe_opcode_t *addr; - struct kprobe *p; - struct kprobe_ctlblk *kcb; - - addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); - /* - * We don't want to be preempted for the entire - * duration of kprobe processing. We conditionally - * re-enable preemption at the end of this function, - * and also in reenter_kprobe() and setup_singlestep(). - */ - preempt_disable(); - - kcb = get_kprobe_ctlblk(); - p = get_kprobe(addr); - - if (p) { - if (kprobe_running()) { - if (reenter_kprobe(p, regs, kcb)) - return 1; - } else { - set_current_kprobe(p, regs, kcb); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; - - /* - * If we have no pre-handler or it returned 0, we - * continue with normal processing. If we have a - * pre-handler and it returned non-zero, it prepped - * for calling the break_handler below on re-entry - * for jprobe processing, so get out doing nothing - * more here. - */ - if (!p->pre_handler || !p->pre_handler(p, regs)) - setup_singlestep(p, regs, kcb, 0); - return 1; - } - } else if (*addr != BREAKPOINT_INSTRUCTION) { - /* - * The breakpoint instruction was removed right - * after we hit it. Another cpu has removed - * either a probepoint or a debugger breakpoint - * at this address. In either case, no further - * handling of this interrupt is appropriate. - * Back up over the (now missing) int3 and run - * the original instruction. - */ - regs->ip = (unsigned long)addr; - preempt_enable_no_resched(); - return 1; - } else if (kprobe_running()) { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - if (!skip_singlestep(p, regs, kcb)) - setup_singlestep(p, regs, kcb, 0); - return 1; - } - } /* else: not a kprobe fault; let the kernel handle it */ - - preempt_enable_no_resched(); - return 0; -} - -/* - * When a retprobed function returns, this code saves registers and - * calls trampoline_handler() runs, which calls the kretprobe's handler. - */ -static void __used __kprobes kretprobe_trampoline_holder(void) -{ - asm volatile ( - ".global kretprobe_trampoline\n" - "kretprobe_trampoline: \n" -#ifdef CONFIG_X86_64 - /* We don't bother saving the ss register */ - " pushq %rsp\n" - " pushfq\n" - SAVE_REGS_STRING - " movq %rsp, %rdi\n" - " call trampoline_handler\n" - /* Replace saved sp with true return address. */ - " movq %rax, 152(%rsp)\n" - RESTORE_REGS_STRING - " popfq\n" -#else - " pushf\n" - SAVE_REGS_STRING - " movl %esp, %eax\n" - " call trampoline_handler\n" - /* Move flags to cs */ - " movl 56(%esp), %edx\n" - " movl %edx, 52(%esp)\n" - /* Replace saved flags with true return address. */ - " movl %eax, 56(%esp)\n" - RESTORE_REGS_STRING - " popf\n" -#endif - " ret\n"); -} - -/* - * Called from kretprobe_trampoline - */ -static __used __kprobes void *trampoline_handler(struct pt_regs *regs) -{ - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *node, *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; - kprobe_opcode_t *correct_ret_addr = NULL; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - /* fixup registers */ -#ifdef CONFIG_X86_64 - regs->cs = __KERNEL_CS; -#else - regs->cs = __KERNEL_CS | get_kernel_rpl(); - regs->gs = 0; -#endif - regs->ip = trampoline_address; - regs->orig_ax = ~0UL; - - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * return probes installed on them, and/or more than one - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always pushed into the head of the list - * - when multiple return probes are registered for the same - * function, the (chronologically) first instance's ret_addr - * will be the real return address, and all the rest will - * point to kretprobe_trampoline. - */ - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return (void *)orig_ret_address; -} - -/* - * Called after single-stepping. p->addr is the address of the - * instruction whose first byte has been replaced by the "int 3" - * instruction. To avoid the SMP problems that can occur when we - * temporarily put back the original opcode to single-step, we - * single-stepped a copy of the instruction. The address of this - * copy is p->ainsn.insn. - * - * This function prepares to return from the post-single-step - * interrupt. We have to fix up the stack as follows: - * - * 0) Except in the case of absolute or indirect jump or call instructions, - * the new ip is relative to the copied instruction. We need to make - * it relative to the original instruction. - * - * 1) If the single-stepped instruction was pushfl, then the TF and IF - * flags are set in the just-pushed flags, and may need to be cleared. - * - * 2) If the single-stepped instruction was a call, the return address - * that is atop the stack is the address following the copied instruction. - * We need to make it the address following the original instruction. - * - * If this is the first time we've single-stepped the instruction at - * this probepoint, and the instruction is boostable, boost it: add a - * jump instruction after the copied instruction, that jumps to the next - * instruction after the probepoint. - */ -static void __kprobes -resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) -{ - unsigned long *tos = stack_addr(regs); - unsigned long copy_ip = (unsigned long)p->ainsn.insn; - unsigned long orig_ip = (unsigned long)p->addr; - kprobe_opcode_t *insn = p->ainsn.insn; - - /* Skip prefixes */ - insn = skip_prefixes(insn); - - regs->flags &= ~X86_EFLAGS_TF; - switch (*insn) { - case 0x9c: /* pushfl */ - *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF); - *tos |= kcb->kprobe_old_flags; - break; - case 0xc2: /* iret/ret/lret */ - case 0xc3: - case 0xca: - case 0xcb: - case 0xcf: - case 0xea: /* jmp absolute -- ip is correct */ - /* ip is already adjusted, no more changes required */ - p->ainsn.boostable = 1; - goto no_change; - case 0xe8: /* call relative - Fix return addr */ - *tos = orig_ip + (*tos - copy_ip); - break; -#ifdef CONFIG_X86_32 - case 0x9a: /* call absolute -- same as call absolute, indirect */ - *tos = orig_ip + (*tos - copy_ip); - goto no_change; -#endif - case 0xff: - if ((insn[1] & 0x30) == 0x10) { - /* - * call absolute, indirect - * Fix return addr; ip is correct. - * But this is not boostable - */ - *tos = orig_ip + (*tos - copy_ip); - goto no_change; - } else if (((insn[1] & 0x31) == 0x20) || - ((insn[1] & 0x31) == 0x21)) { - /* - * jmp near and far, absolute indirect - * ip is correct. And this is boostable - */ - p->ainsn.boostable = 1; - goto no_change; - } - default: - break; - } - - if (p->ainsn.boostable == 0) { - if ((regs->ip > copy_ip) && - (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { - /* - * These instructions can be executed directly if it - * jumps back to correct address. - */ - synthesize_reljump((void *)regs->ip, - (void *)orig_ip + (regs->ip - copy_ip)); - p->ainsn.boostable = 1; - } else { - p->ainsn.boostable = -1; - } - } - - regs->ip += orig_ip - copy_ip; - -no_change: - restore_btf(); -} - -/* - * Interrupts are disabled on entry as trap1 is an interrupt gate and they - * remain disabled throughout this function. - */ -static int __kprobes post_kprobe_handler(struct pt_regs *regs) -{ - struct kprobe *cur = kprobe_running(); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (!cur) - return 0; - - resume_execution(cur, regs, kcb); - regs->flags |= kcb->kprobe_saved_flags; - - if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - cur->post_handler(cur, regs, 0); - } - - /* Restore back the original saved kprobes variables and continue. */ - if (kcb->kprobe_status == KPROBE_REENTER) { - restore_previous_kprobe(kcb); - goto out; - } - reset_current_kprobe(); -out: - preempt_enable_no_resched(); - - /* - * if somebody else is singlestepping across a probe point, flags - * will have TF set, in which case, continue the remaining processing - * of do_debug, as if this is not a probe hit. - */ - if (regs->flags & X86_EFLAGS_TF) - return 0; - - return 1; -} - -int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) -{ - struct kprobe *cur = kprobe_running(); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - switch (kcb->kprobe_status) { - case KPROBE_HIT_SS: - case KPROBE_REENTER: - /* - * We are here because the instruction being single - * stepped caused a page fault. We reset the current - * kprobe and the ip points back to the probe address - * and allow the page fault handler to continue as a - * normal page fault. - */ - regs->ip = (unsigned long)cur->addr; - regs->flags |= kcb->kprobe_old_flags; - if (kcb->kprobe_status == KPROBE_REENTER) - restore_previous_kprobe(kcb); - else - reset_current_kprobe(); - preempt_enable_no_resched(); - break; - case KPROBE_HIT_ACTIVE: - case KPROBE_HIT_SSDONE: - /* - * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accounting - * these specific fault cases. - */ - kprobes_inc_nmissed_count(cur); - - /* - * We come here because instructions in the pre/post - * handler caused the page_fault, this could happen - * if handler tries to access user space by - * copy_from_user(), get_user() etc. Let the - * user-specified handler try to fix it first. - */ - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - - /* - * In case the user-specified fault handler returned - * zero, try to fix up. - */ - if (fixup_exception(regs)) - return 1; - - /* - * fixup routine could not handle it, - * Let do_page_fault() fix it. - */ - break; - default: - break; - } - return 0; -} - -/* - * Wrapper routine for handling exceptions. - */ -int __kprobes -kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) -{ - struct die_args *args = data; - int ret = NOTIFY_DONE; - - if (args->regs && user_mode_vm(args->regs)) - return ret; - - switch (val) { - case DIE_INT3: - if (kprobe_handler(args->regs)) - ret = NOTIFY_STOP; - break; - case DIE_DEBUG: - if (post_kprobe_handler(args->regs)) { - /* - * Reset the BS bit in dr6 (pointed by args->err) to - * denote completion of processing - */ - (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; - ret = NOTIFY_STOP; - } - break; - case DIE_GPF: - /* - * To be potentially processing a kprobe fault and to - * trust the result from kprobe_running(), we have - * be non-preemptible. - */ - if (!preemptible() && kprobe_running() && - kprobe_fault_handler(args->regs, args->trapnr)) - ret = NOTIFY_STOP; - break; - default: - break; - } - return ret; -} - -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - unsigned long addr; - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - kcb->jprobe_saved_regs = *regs; - kcb->jprobe_saved_sp = stack_addr(regs); - addr = (unsigned long)(kcb->jprobe_saved_sp); - - /* - * As Linus pointed out, gcc assumes that the callee - * owns the argument space and could overwrite it, e.g. - * tailcall optimization. So, to be absolutely safe - * we also save and restore enough stack bytes to cover - * the argument area. - */ - memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, - MIN_STACK_SIZE(addr)); - regs->flags &= ~X86_EFLAGS_IF; - trace_hardirqs_off(); - regs->ip = (unsigned long)(jp->entry); - return 1; -} - -void __kprobes jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - asm volatile ( -#ifdef CONFIG_X86_64 - " xchg %%rbx,%%rsp \n" -#else - " xchgl %%ebx,%%esp \n" -#endif - " int3 \n" - " .globl jprobe_return_end\n" - " jprobe_return_end: \n" - " nop \n"::"b" - (kcb->jprobe_saved_sp):"memory"); -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - u8 *addr = (u8 *) (regs->ip - 1); - struct jprobe *jp = container_of(p, struct jprobe, kp); - - if ((addr > (u8 *) jprobe_return) && - (addr < (u8 *) jprobe_return_end)) { - if (stack_addr(regs) != kcb->jprobe_saved_sp) { - struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; - printk(KERN_ERR - "current sp %p does not match saved sp %p\n", - stack_addr(regs), kcb->jprobe_saved_sp); - printk(KERN_ERR "Saved registers for jprobe %p\n", jp); - show_regs(saved_regs); - printk(KERN_ERR "Current registers\n"); - show_regs(regs); - BUG(); - } - *regs = kcb->jprobe_saved_regs; - memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), - kcb->jprobes_stack, - MIN_STACK_SIZE(kcb->jprobe_saved_sp)); - preempt_enable_no_resched(); - return 1; - } - return 0; -} - -int __init arch_init_kprobes(void) -{ - return arch_init_optprobes(); -} - -int __kprobes arch_trampoline_kprobe(struct kprobe *p) -{ - return 0; -} diff --git a/arch/x86/kernel/kprobes/Makefile b/arch/x86/kernel/kprobes/Makefile new file mode 100644 index 000000000000..0d33169cc1a2 --- /dev/null +++ b/arch/x86/kernel/kprobes/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for kernel probes +# + +obj-$(CONFIG_KPROBES) += core.o +obj-$(CONFIG_OPTPROBES) += opt.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h new file mode 100644 index 000000000000..2e9d4b5af036 --- /dev/null +++ b/arch/x86/kernel/kprobes/common.h @@ -0,0 +1,113 @@ +#ifndef __X86_KERNEL_KPROBES_COMMON_H +#define __X86_KERNEL_KPROBES_COMMON_H + +/* Kprobes and Optprobes common header */ + +#ifdef CONFIG_X86_64 +#define SAVE_REGS_STRING \ + /* Skip cs, ip, orig_ax. */ \ + " subq $24, %rsp\n" \ + " pushq %rdi\n" \ + " pushq %rsi\n" \ + " pushq %rdx\n" \ + " pushq %rcx\n" \ + " pushq %rax\n" \ + " pushq %r8\n" \ + " pushq %r9\n" \ + " pushq %r10\n" \ + " pushq %r11\n" \ + " pushq %rbx\n" \ + " pushq %rbp\n" \ + " pushq %r12\n" \ + " pushq %r13\n" \ + " pushq %r14\n" \ + " pushq %r15\n" +#define RESTORE_REGS_STRING \ + " popq %r15\n" \ + " popq %r14\n" \ + " popq %r13\n" \ + " popq %r12\n" \ + " popq %rbp\n" \ + " popq %rbx\n" \ + " popq %r11\n" \ + " popq %r10\n" \ + " popq %r9\n" \ + " popq %r8\n" \ + " popq %rax\n" \ + " popq %rcx\n" \ + " popq %rdx\n" \ + " popq %rsi\n" \ + " popq %rdi\n" \ + /* Skip orig_ax, ip, cs */ \ + " addq $24, %rsp\n" +#else +#define SAVE_REGS_STRING \ + /* Skip cs, ip, orig_ax and gs. */ \ + " subl $16, %esp\n" \ + " pushl %fs\n" \ + " pushl %es\n" \ + " pushl %ds\n" \ + " pushl %eax\n" \ + " pushl %ebp\n" \ + " pushl %edi\n" \ + " pushl %esi\n" \ + " pushl %edx\n" \ + " pushl %ecx\n" \ + " pushl %ebx\n" +#define RESTORE_REGS_STRING \ + " popl %ebx\n" \ + " popl %ecx\n" \ + " popl %edx\n" \ + " popl %esi\n" \ + " popl %edi\n" \ + " popl %ebp\n" \ + " popl %eax\n" \ + /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\ + " addl $24, %esp\n" +#endif + +/* Ensure if the instruction can be boostable */ +extern int can_boost(kprobe_opcode_t *instruction); +/* Recover instruction if given address is probed */ +extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, + unsigned long addr); +/* + * Copy an instruction and adjust the displacement if the instruction + * uses the %rip-relative addressing mode. + */ +extern int __copy_instruction(u8 *dest, u8 *src); + +/* Generate a relative-jump/call instruction */ +extern void synthesize_reljump(void *from, void *to); +extern void synthesize_relcall(void *from, void *to); + +#ifdef CONFIG_OPTPROBES +extern int arch_init_optprobes(void); +extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter); +extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr); +#else /* !CONFIG_OPTPROBES */ +static inline int arch_init_optprobes(void) +{ + return 0; +} +static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) +{ + return 0; +} +static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) +{ + return addr; +} +#endif + +#ifdef CONFIG_KPROBES_ON_FTRACE +extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb); +#else +static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + return 0; +} +#endif +#endif diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c new file mode 100644 index 000000000000..e124554598ee --- /dev/null +++ b/arch/x86/kernel/kprobes/core.c @@ -0,0 +1,1064 @@ +/* + * Kernel Probes (KProbes) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004 + * + * 2002-Oct Created by Vamsi Krishna S Kernel + * Probes initial implementation ( includes contributions from + * Rusty Russell). + * 2004-July Suparna Bhattacharya added jumper probes + * interface to access function arguments. + * 2004-Oct Jim Keniston and Prasanna S Panchamukhi + * adapted for x86_64 from i386. + * 2005-Mar Roland McGrath + * Fixed to handle %rip-relative addressing mode correctly. + * 2005-May Hien Nguyen , Jim Keniston + * and Prasanna S Panchamukhi + * added function-return probes. + * 2005-May Rusty Lynch + * Added function return probes functionality + * 2006-Feb Masami Hiramatsu added + * kprobe-booster and kretprobe-booster for i386. + * 2007-Dec Masami Hiramatsu added kprobe-booster + * and kretprobe-booster for x86-64 + * 2007-Dec Masami Hiramatsu , Arjan van de Ven + * and Jim Keniston + * unified x86 kprobes code. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +void jprobe_return_end(void); + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs)) + +#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ + (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ + (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ + (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ + (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ + << (row % 32)) + /* + * Undefined/reserved opcodes, conditional jump, Opcode Extension + * Groups, and some special opcodes can not boost. + * This is non-const and volatile to keep gcc from statically + * optimizing it out, as variable_test_bit makes gcc think only + * *(unsigned long*) is used. + */ +static volatile u32 twobyte_is_boostable[256 / 32] = { + /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ + /* ---------------------------------------------- */ + W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ + W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */ + W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ + W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ + W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ + W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ + W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ + W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ + W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ + W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ + W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ + W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ + W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ + W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ + W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ + W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ + /* ----------------------------------------------- */ + /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ +}; +#undef W + +struct kretprobe_blackpoint kretprobe_blacklist[] = { + {"__switch_to", }, /* This function switches only current task, but + doesn't switch kernel stack.*/ + {NULL, NULL} /* Terminator */ +}; + +const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); + +static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) +{ + struct __arch_relative_insn { + u8 op; + s32 raddr; + } __packed *insn; + + insn = (struct __arch_relative_insn *)from; + insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); + insn->op = op; +} + +/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ +void __kprobes synthesize_reljump(void *from, void *to) +{ + __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); +} + +/* Insert a call instruction at address 'from', which calls address 'to'.*/ +void __kprobes synthesize_relcall(void *from, void *to) +{ + __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); +} + +/* + * Skip the prefixes of the instruction. + */ +static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) +{ + insn_attr_t attr; + + attr = inat_get_opcode_attribute((insn_byte_t)*insn); + while (inat_is_legacy_prefix(attr)) { + insn++; + attr = inat_get_opcode_attribute((insn_byte_t)*insn); + } +#ifdef CONFIG_X86_64 + if (inat_is_rex_prefix(attr)) + insn++; +#endif + return insn; +} + +/* + * Returns non-zero if opcode is boostable. + * RIP relative instructions are adjusted at copying time in 64 bits mode + */ +int __kprobes can_boost(kprobe_opcode_t *opcodes) +{ + kprobe_opcode_t opcode; + kprobe_opcode_t *orig_opcodes = opcodes; + + if (search_exception_tables((unsigned long)opcodes)) + return 0; /* Page fault may occur on this address. */ + +retry: + if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) + return 0; + opcode = *(opcodes++); + + /* 2nd-byte opcode */ + if (opcode == 0x0f) { + if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) + return 0; + return test_bit(*opcodes, + (unsigned long *)twobyte_is_boostable); + } + + switch (opcode & 0xf0) { +#ifdef CONFIG_X86_64 + case 0x40: + goto retry; /* REX prefix is boostable */ +#endif + case 0x60: + if (0x63 < opcode && opcode < 0x67) + goto retry; /* prefixes */ + /* can't boost Address-size override and bound */ + return (opcode != 0x62 && opcode != 0x67); + case 0x70: + return 0; /* can't boost conditional jump */ + case 0xc0: + /* can't boost software-interruptions */ + return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; + case 0xd0: + /* can boost AA* and XLAT */ + return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); + case 0xe0: + /* can boost in/out and absolute jmps */ + return ((opcode & 0x04) || opcode == 0xea); + case 0xf0: + if ((opcode & 0x0c) == 0 && opcode != 0xf1) + goto retry; /* lock/rep(ne) prefix */ + /* clear and set flags are boostable */ + return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); + default: + /* segment override prefixes are boostable */ + if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) + goto retry; /* prefixes */ + /* CS override prefix and call are not boostable */ + return (opcode != 0x2e && opcode != 0x9a); + } +} + +static unsigned long +__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) +{ + struct kprobe *kp; + + kp = get_kprobe((void *)addr); + /* There is no probe, return original address */ + if (!kp) + return addr; + + /* + * Basically, kp->ainsn.insn has an original instruction. + * However, RIP-relative instruction can not do single-stepping + * at different place, __copy_instruction() tweaks the displacement of + * that instruction. In that case, we can't recover the instruction + * from the kp->ainsn.insn. + * + * On the other hand, kp->opcode has a copy of the first byte of + * the probed instruction, which is overwritten by int3. And + * the instruction at kp->addr is not modified by kprobes except + * for the first byte, we can recover the original instruction + * from it and kp->opcode. + */ + memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + buf[0] = kp->opcode; + return (unsigned long)buf; +} + +/* + * Recover the probed instruction at addr for further analysis. + * Caller must lock kprobes by kprobe_mutex, or disable preemption + * for preventing to release referencing kprobes. + */ +unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) +{ + unsigned long __addr; + + __addr = __recover_optprobed_insn(buf, addr); + if (__addr != addr) + return __addr; + + return __recover_probed_insn(buf, addr); +} + +/* Check if paddr is at an instruction boundary */ +static int __kprobes can_probe(unsigned long paddr) +{ + unsigned long addr, __addr, offset = 0; + struct insn insn; + kprobe_opcode_t buf[MAX_INSN_SIZE]; + + if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) + return 0; + + /* Decode instructions */ + addr = paddr - offset; + while (addr < paddr) { + /* + * Check if the instruction has been modified by another + * kprobe, in which case we replace the breakpoint by the + * original instruction in our buffer. + * Also, jump optimization will change the breakpoint to + * relative-jump. Since the relative-jump itself is + * normally used, we just go through if there is no kprobe. + */ + __addr = recover_probed_instruction(buf, addr); + kernel_insn_init(&insn, (void *)__addr); + insn_get_length(&insn); + + /* + * Another debugging subsystem might insert this breakpoint. + * In that case, we can't recover it. + */ + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + return 0; + addr += insn.length; + } + + return (addr == paddr); +} + +/* + * Returns non-zero if opcode modifies the interrupt flag. + */ +static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) +{ + /* Skip prefixes */ + insn = skip_prefixes(insn); + + switch (*insn) { + case 0xfa: /* cli */ + case 0xfb: /* sti */ + case 0xcf: /* iret/iretd */ + case 0x9d: /* popf/popfd */ + return 1; + } + + return 0; +} + +/* + * Copy an instruction and adjust the displacement if the instruction + * uses the %rip-relative addressing mode. + * If it does, Return the address of the 32-bit displacement word. + * If not, return null. + * Only applicable to 64-bit x86. + */ +int __kprobes __copy_instruction(u8 *dest, u8 *src) +{ + struct insn insn; + kprobe_opcode_t buf[MAX_INSN_SIZE]; + + kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src)); + insn_get_length(&insn); + /* Another subsystem puts a breakpoint, failed to recover */ + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + return 0; + memcpy(dest, insn.kaddr, insn.length); + +#ifdef CONFIG_X86_64 + if (insn_rip_relative(&insn)) { + s64 newdisp; + u8 *disp; + kernel_insn_init(&insn, dest); + insn_get_displacement(&insn); + /* + * The copied instruction uses the %rip-relative addressing + * mode. Adjust the displacement for the difference between + * the original location of this instruction and the location + * of the copy that will actually be run. The tricky bit here + * is making sure that the sign extension happens correctly in + * this calculation, since we need a signed 32-bit result to + * be sign-extended to 64 bits when it's added to the %rip + * value and yield the same 64-bit result that the sign- + * extension of the original signed 32-bit displacement would + * have given. + */ + newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; + BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ + disp = (u8 *) dest + insn_offset_displacement(&insn); + *(s32 *) disp = (s32) newdisp; + } +#endif + return insn.length; +} + +static void __kprobes arch_copy_kprobe(struct kprobe *p) +{ + /* Copy an instruction with recovering if other optprobe modifies it.*/ + __copy_instruction(p->ainsn.insn, p->addr); + + /* + * __copy_instruction can modify the displacement of the instruction, + * but it doesn't affect boostable check. + */ + if (can_boost(p->ainsn.insn)) + p->ainsn.boostable = 0; + else + p->ainsn.boostable = -1; + + /* Also, displacement change doesn't affect the first byte */ + p->opcode = p->ainsn.insn[0]; +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + if (alternatives_text_reserved(p->addr, p->addr)) + return -EINVAL; + + if (!can_probe((unsigned long)p->addr)) + return -EILSEQ; + /* insn: must be on special executable page on x86. */ + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) + return -ENOMEM; + arch_copy_kprobe(p); + return 0; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + text_poke(p->addr, &p->opcode, 1); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); + p->ainsn.insn = NULL; + } +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; + kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; + kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; + kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; + kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; +} + +static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, p); + kcb->kprobe_saved_flags = kcb->kprobe_old_flags + = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); + if (is_IF_modifier(p->ainsn.insn)) + kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; +} + +static void __kprobes clear_btf(void) +{ + if (test_thread_flag(TIF_BLOCKSTEP)) { + unsigned long debugctl = get_debugctlmsr(); + + debugctl &= ~DEBUGCTLMSR_BTF; + update_debugctlmsr(debugctl); + } +} + +static void __kprobes restore_btf(void) +{ + if (test_thread_flag(TIF_BLOCKSTEP)) { + unsigned long debugctl = get_debugctlmsr(); + + debugctl |= DEBUGCTLMSR_BTF; + update_debugctlmsr(debugctl); + } +} + +void __kprobes +arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) +{ + unsigned long *sara = stack_addr(regs); + + ri->ret_addr = (kprobe_opcode_t *) *sara; + + /* Replace the return addr with trampoline addr */ + *sara = (unsigned long) &kretprobe_trampoline; +} + +static void __kprobes +setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) +{ + if (setup_detour_execution(p, regs, reenter)) + return; + +#if !defined(CONFIG_PREEMPT) + if (p->ainsn.boostable == 1 && !p->post_handler) { + /* Boost up -- we can execute copied instructions directly */ + if (!reenter) + reset_current_kprobe(); + /* + * Reentering boosted probe doesn't reset current_kprobe, + * nor set current_kprobe, because it doesn't use single + * stepping. + */ + regs->ip = (unsigned long)p->ainsn.insn; + preempt_enable_no_resched(); + return; + } +#endif + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p, regs, kcb); + kcb->kprobe_status = KPROBE_REENTER; + } else + kcb->kprobe_status = KPROBE_HIT_SS; + /* Prepare real single stepping */ + clear_btf(); + regs->flags |= X86_EFLAGS_TF; + regs->flags &= ~X86_EFLAGS_IF; + /* single step inline if the instruction is an int3 */ + if (p->opcode == BREAKPOINT_INSTRUCTION) + regs->ip = (unsigned long)p->addr; + else + regs->ip = (unsigned long)p->ainsn.insn; +} + +/* + * We have reentered the kprobe_handler(), since another probe was hit while + * within the handler. We save the original kprobes variables and just single + * step on the instruction of the new probe without calling any user handlers. + */ +static int __kprobes +reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + /* A probe has been hit in the codepath leading up to, or just + * after, single-stepping of a probed instruction. This entire + * codepath should strictly reside in .kprobes.text section. + * Raise a BUG or we'll continue in an endless reentering loop + * and eventually a stack overflow. + */ + printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", + p->addr); + dump_kprobe(p); + BUG(); + default: + /* impossible cases */ + WARN_ON(1); + return 0; + } + + return 1; +} + +/* + * Interrupts are disabled on entry as trap3 is an interrupt gate and they + * remain disabled throughout this function. + */ +static int __kprobes kprobe_handler(struct pt_regs *regs) +{ + kprobe_opcode_t *addr; + struct kprobe *p; + struct kprobe_ctlblk *kcb; + + addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); + /* + * We don't want to be preempted for the entire + * duration of kprobe processing. We conditionally + * re-enable preemption at the end of this function, + * and also in reenter_kprobe() and setup_singlestep(). + */ + preempt_disable(); + + kcb = get_kprobe_ctlblk(); + p = get_kprobe(addr); + + if (p) { + if (kprobe_running()) { + if (reenter_kprobe(p, regs, kcb)) + return 1; + } else { + set_current_kprobe(p, regs, kcb); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it prepped + * for calling the break_handler below on re-entry + * for jprobe processing, so get out doing nothing + * more here. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + return 1; + } + } else if (*addr != BREAKPOINT_INSTRUCTION) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + * Back up over the (now missing) int3 and run + * the original instruction. + */ + regs->ip = (unsigned long)addr; + preempt_enable_no_resched(); + return 1; + } else if (kprobe_running()) { + p = __this_cpu_read(current_kprobe); + if (p->break_handler && p->break_handler(p, regs)) { + if (!skip_singlestep(p, regs, kcb)) + setup_singlestep(p, regs, kcb, 0); + return 1; + } + } /* else: not a kprobe fault; let the kernel handle it */ + + preempt_enable_no_resched(); + return 0; +} + +/* + * When a retprobed function returns, this code saves registers and + * calls trampoline_handler() runs, which calls the kretprobe's handler. + */ +static void __used __kprobes kretprobe_trampoline_holder(void) +{ + asm volatile ( + ".global kretprobe_trampoline\n" + "kretprobe_trampoline: \n" +#ifdef CONFIG_X86_64 + /* We don't bother saving the ss register */ + " pushq %rsp\n" + " pushfq\n" + SAVE_REGS_STRING + " movq %rsp, %rdi\n" + " call trampoline_handler\n" + /* Replace saved sp with true return address. */ + " movq %rax, 152(%rsp)\n" + RESTORE_REGS_STRING + " popfq\n" +#else + " pushf\n" + SAVE_REGS_STRING + " movl %esp, %eax\n" + " call trampoline_handler\n" + /* Move flags to cs */ + " movl 56(%esp), %edx\n" + " movl %edx, 52(%esp)\n" + /* Replace saved flags with true return address. */ + " movl %eax, 56(%esp)\n" + RESTORE_REGS_STRING + " popf\n" +#endif + " ret\n"); +} + +/* + * Called from kretprobe_trampoline + */ +static __used __kprobes void *trampoline_handler(struct pt_regs *regs) +{ + struct kretprobe_instance *ri = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *node, *tmp; + unsigned long flags, orig_ret_address = 0; + unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; + kprobe_opcode_t *correct_ret_addr = NULL; + + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); + /* fixup registers */ +#ifdef CONFIG_X86_64 + regs->cs = __KERNEL_CS; +#else + regs->cs = __KERNEL_CS | get_kernel_rpl(); + regs->gs = 0; +#endif + regs->ip = trampoline_address; + regs->orig_ax = ~0UL; + + /* + * It is possible to have multiple instances associated with a given + * task either because multiple functions in the call path have + * return probes installed on them, and/or more than one + * return probe was registered for a target function. + * + * We can handle this because: + * - instances are always pushed into the head of the list + * - when multiple return probes are registered for the same + * function, the (chronologically) first instance's ret_addr + * will be the real return address, and all the rest will + * point to kretprobe_trampoline. + */ + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + orig_ret_address = (unsigned long)ri->ret_addr; + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_assert(ri, orig_ret_address, trampoline_address); + + correct_ret_addr = ri->ret_addr; + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + orig_ret_address = (unsigned long)ri->ret_addr; + if (ri->rp && ri->rp->handler) { + __this_cpu_write(current_kprobe, &ri->rp->kp); + get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; + ri->ret_addr = correct_ret_addr; + ri->rp->handler(ri, regs); + __this_cpu_write(current_kprobe, NULL); + } + + recycle_rp_inst(ri, &empty_rp); + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_hash_unlock(current, &flags); + + hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); + } + return (void *)orig_ret_address; +} + +/* + * Called after single-stepping. p->addr is the address of the + * instruction whose first byte has been replaced by the "int 3" + * instruction. To avoid the SMP problems that can occur when we + * temporarily put back the original opcode to single-step, we + * single-stepped a copy of the instruction. The address of this + * copy is p->ainsn.insn. + * + * This function prepares to return from the post-single-step + * interrupt. We have to fix up the stack as follows: + * + * 0) Except in the case of absolute or indirect jump or call instructions, + * the new ip is relative to the copied instruction. We need to make + * it relative to the original instruction. + * + * 1) If the single-stepped instruction was pushfl, then the TF and IF + * flags are set in the just-pushed flags, and may need to be cleared. + * + * 2) If the single-stepped instruction was a call, the return address + * that is atop the stack is the address following the copied instruction. + * We need to make it the address following the original instruction. + * + * If this is the first time we've single-stepped the instruction at + * this probepoint, and the instruction is boostable, boost it: add a + * jump instruction after the copied instruction, that jumps to the next + * instruction after the probepoint. + */ +static void __kprobes +resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) +{ + unsigned long *tos = stack_addr(regs); + unsigned long copy_ip = (unsigned long)p->ainsn.insn; + unsigned long orig_ip = (unsigned long)p->addr; + kprobe_opcode_t *insn = p->ainsn.insn; + + /* Skip prefixes */ + insn = skip_prefixes(insn); + + regs->flags &= ~X86_EFLAGS_TF; + switch (*insn) { + case 0x9c: /* pushfl */ + *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF); + *tos |= kcb->kprobe_old_flags; + break; + case 0xc2: /* iret/ret/lret */ + case 0xc3: + case 0xca: + case 0xcb: + case 0xcf: + case 0xea: /* jmp absolute -- ip is correct */ + /* ip is already adjusted, no more changes required */ + p->ainsn.boostable = 1; + goto no_change; + case 0xe8: /* call relative - Fix return addr */ + *tos = orig_ip + (*tos - copy_ip); + break; +#ifdef CONFIG_X86_32 + case 0x9a: /* call absolute -- same as call absolute, indirect */ + *tos = orig_ip + (*tos - copy_ip); + goto no_change; +#endif + case 0xff: + if ((insn[1] & 0x30) == 0x10) { + /* + * call absolute, indirect + * Fix return addr; ip is correct. + * But this is not boostable + */ + *tos = orig_ip + (*tos - copy_ip); + goto no_change; + } else if (((insn[1] & 0x31) == 0x20) || + ((insn[1] & 0x31) == 0x21)) { + /* + * jmp near and far, absolute indirect + * ip is correct. And this is boostable + */ + p->ainsn.boostable = 1; + goto no_change; + } + default: + break; + } + + if (p->ainsn.boostable == 0) { + if ((regs->ip > copy_ip) && + (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { + /* + * These instructions can be executed directly if it + * jumps back to correct address. + */ + synthesize_reljump((void *)regs->ip, + (void *)orig_ip + (regs->ip - copy_ip)); + p->ainsn.boostable = 1; + } else { + p->ainsn.boostable = -1; + } + } + + regs->ip += orig_ip - copy_ip; + +no_change: + restore_btf(); +} + +/* + * Interrupts are disabled on entry as trap1 is an interrupt gate and they + * remain disabled throughout this function. + */ +static int __kprobes post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (!cur) + return 0; + + resume_execution(cur, regs, kcb); + regs->flags |= kcb->kprobe_saved_flags; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + /* Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + reset_current_kprobe(); +out: + preempt_enable_no_resched(); + + /* + * if somebody else is singlestepping across a probe point, flags + * will have TF set, in which case, continue the remaining processing + * of do_debug, as if this is not a probe hit. + */ + if (regs->flags & X86_EFLAGS_TF) + return 0; + + return 1; +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + switch (kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the ip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->ip = (unsigned long)cur->addr; + regs->flags |= kcb->kprobe_old_flags; + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accounting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if (fixup_exception(regs)) + return 1; + + /* + * fixup routine could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; + } + return 0; +} + +/* + * Wrapper routine for handling exceptions. + */ +int __kprobes +kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) +{ + struct die_args *args = data; + int ret = NOTIFY_DONE; + + if (args->regs && user_mode_vm(args->regs)) + return ret; + + switch (val) { + case DIE_INT3: + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + case DIE_DEBUG: + if (post_kprobe_handler(args->regs)) { + /* + * Reset the BS bit in dr6 (pointed by args->err) to + * denote completion of processing + */ + (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; + ret = NOTIFY_STOP; + } + break; + case DIE_GPF: + /* + * To be potentially processing a kprobe fault and to + * trust the result from kprobe_running(), we have + * be non-preemptible. + */ + if (!preemptible() && kprobe_running() && + kprobe_fault_handler(args->regs, args->trapnr)) + ret = NOTIFY_STOP; + break; + default: + break; + } + return ret; +} + +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct jprobe *jp = container_of(p, struct jprobe, kp); + unsigned long addr; + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + kcb->jprobe_saved_regs = *regs; + kcb->jprobe_saved_sp = stack_addr(regs); + addr = (unsigned long)(kcb->jprobe_saved_sp); + + /* + * As Linus pointed out, gcc assumes that the callee + * owns the argument space and could overwrite it, e.g. + * tailcall optimization. So, to be absolutely safe + * we also save and restore enough stack bytes to cover + * the argument area. + */ + memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, + MIN_STACK_SIZE(addr)); + regs->flags &= ~X86_EFLAGS_IF; + trace_hardirqs_off(); + regs->ip = (unsigned long)(jp->entry); + return 1; +} + +void __kprobes jprobe_return(void) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + asm volatile ( +#ifdef CONFIG_X86_64 + " xchg %%rbx,%%rsp \n" +#else + " xchgl %%ebx,%%esp \n" +#endif + " int3 \n" + " .globl jprobe_return_end\n" + " jprobe_return_end: \n" + " nop \n"::"b" + (kcb->jprobe_saved_sp):"memory"); +} + +int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + u8 *addr = (u8 *) (regs->ip - 1); + struct jprobe *jp = container_of(p, struct jprobe, kp); + + if ((addr > (u8 *) jprobe_return) && + (addr < (u8 *) jprobe_return_end)) { + if (stack_addr(regs) != kcb->jprobe_saved_sp) { + struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; + printk(KERN_ERR + "current sp %p does not match saved sp %p\n", + stack_addr(regs), kcb->jprobe_saved_sp); + printk(KERN_ERR "Saved registers for jprobe %p\n", jp); + show_regs(saved_regs); + printk(KERN_ERR "Current registers\n"); + show_regs(regs); + BUG(); + } + *regs = kcb->jprobe_saved_regs; + memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), + kcb->jprobes_stack, + MIN_STACK_SIZE(kcb->jprobe_saved_sp)); + preempt_enable_no_resched(); + return 1; + } + return 0; +} + +int __init arch_init_kprobes(void) +{ + return arch_init_optprobes(); +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c new file mode 100644 index 000000000000..23ef5c556f06 --- /dev/null +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -0,0 +1,93 @@ +/* + * Dynamic Ftrace based Kprobes Optimization + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) Hitachi Ltd., 2012 + */ +#include +#include +#include +#include +#include + +#include "common.h" + +static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + /* + * Emulate singlestep (and also recover regs->ip) + * as if there is a 5byte nop + */ + regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + __this_cpu_write(current_kprobe, NULL); + return 1; +} + +int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + if (kprobe_ftrace(p)) + return __skip_singlestep(p, regs, kcb); + else + return 0; +} + +/* Ftrace callback handler for kprobes */ +void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + unsigned long flags; + + /* Disable irq for emulating a breakpoint and avoiding preempt */ + local_irq_save(flags); + + p = get_kprobe((kprobe_opcode_t *)ip); + if (unlikely(!p) || kprobe_disabled(p)) + goto end; + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ + regs->ip = ip + sizeof(kprobe_opcode_t); + + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) + __skip_singlestep(p, regs, kcb); + /* + * If pre_handler returns !0, it sets regs->ip and + * resets current kprobe. + */ + } +end: + local_irq_restore(flags); +} + +int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.insn = NULL; + p->ainsn.boostable = -1; + return 0; +} diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c new file mode 100644 index 000000000000..76dc6f095724 --- /dev/null +++ b/arch/x86/kernel/kprobes/opt.c @@ -0,0 +1,512 @@ +/* + * Kernel Probes Jump Optimization (Optprobes) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004 + * Copyright (C) Hitachi Ltd., 2012 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) +{ + struct optimized_kprobe *op; + struct kprobe *kp; + long offs; + int i; + + for (i = 0; i < RELATIVEJUMP_SIZE; i++) { + kp = get_kprobe((void *)addr - i); + /* This function only handles jump-optimized kprobe */ + if (kp && kprobe_optimized(kp)) { + op = container_of(kp, struct optimized_kprobe, kp); + /* If op->list is not empty, op is under optimizing */ + if (list_empty(&op->list)) + goto found; + } + } + + return addr; +found: + /* + * If the kprobe can be optimized, original bytes which can be + * overwritten by jump destination address. In this case, original + * bytes must be recovered from op->optinsn.copied_insn buffer. + */ + memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + if (addr == (unsigned long)kp->addr) { + buf[0] = kp->opcode; + memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); + } else { + offs = addr - (unsigned long)kp->addr - 1; + memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); + } + + return (unsigned long)buf; +} + +/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ +static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) +{ +#ifdef CONFIG_X86_64 + *addr++ = 0x48; + *addr++ = 0xbf; +#else + *addr++ = 0xb8; +#endif + *(unsigned long *)addr = val; +} + +static void __used __kprobes kprobes_optinsn_template_holder(void) +{ + asm volatile ( + ".global optprobe_template_entry\n" + "optprobe_template_entry:\n" +#ifdef CONFIG_X86_64 + /* We don't bother saving the ss register */ + " pushq %rsp\n" + " pushfq\n" + SAVE_REGS_STRING + " movq %rsp, %rsi\n" + ".global optprobe_template_val\n" + "optprobe_template_val:\n" + ASM_NOP5 + ASM_NOP5 + ".global optprobe_template_call\n" + "optprobe_template_call:\n" + ASM_NOP5 + /* Move flags to rsp */ + " movq 144(%rsp), %rdx\n" + " movq %rdx, 152(%rsp)\n" + RESTORE_REGS_STRING + /* Skip flags entry */ + " addq $8, %rsp\n" + " popfq\n" +#else /* CONFIG_X86_32 */ + " pushf\n" + SAVE_REGS_STRING + " movl %esp, %edx\n" + ".global optprobe_template_val\n" + "optprobe_template_val:\n" + ASM_NOP5 + ".global optprobe_template_call\n" + "optprobe_template_call:\n" + ASM_NOP5 + RESTORE_REGS_STRING + " addl $4, %esp\n" /* skip cs */ + " popf\n" +#endif + ".global optprobe_template_end\n" + "optprobe_template_end:\n"); +} + +#define TMPL_MOVE_IDX \ + ((long)&optprobe_template_val - (long)&optprobe_template_entry) +#define TMPL_CALL_IDX \ + ((long)&optprobe_template_call - (long)&optprobe_template_entry) +#define TMPL_END_IDX \ + ((long)&optprobe_template_end - (long)&optprobe_template_entry) + +#define INT3_SIZE sizeof(kprobe_opcode_t) + +/* Optimized kprobe call back function: called from optinsn */ +static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long flags; + + /* This is possible if op is under delayed unoptimizing */ + if (kprobe_disabled(&op->kp)) + return; + + local_irq_save(flags); + if (kprobe_running()) { + kprobes_inc_nmissed_count(&op->kp); + } else { + /* Save skipped registers */ +#ifdef CONFIG_X86_64 + regs->cs = __KERNEL_CS; +#else + regs->cs = __KERNEL_CS | get_kernel_rpl(); + regs->gs = 0; +#endif + regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; + regs->orig_ax = ~0UL; + + __this_cpu_write(current_kprobe, &op->kp); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + opt_pre_handler(&op->kp, regs); + __this_cpu_write(current_kprobe, NULL); + } + local_irq_restore(flags); +} + +static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) +{ + int len = 0, ret; + + while (len < RELATIVEJUMP_SIZE) { + ret = __copy_instruction(dest + len, src + len); + if (!ret || !can_boost(dest + len)) + return -EINVAL; + len += ret; + } + /* Check whether the address range is reserved */ + if (ftrace_text_reserved(src, src + len - 1) || + alternatives_text_reserved(src, src + len - 1) || + jump_label_text_reserved(src, src + len - 1)) + return -EBUSY; + + return len; +} + +/* Check whether insn is indirect jump */ +static int __kprobes insn_is_indirect_jump(struct insn *insn) +{ + return ((insn->opcode.bytes[0] == 0xff && + (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ + insn->opcode.bytes[0] == 0xea); /* Segment based jump */ +} + +/* Check whether insn jumps into specified address range */ +static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) +{ + unsigned long target = 0; + + switch (insn->opcode.bytes[0]) { + case 0xe0: /* loopne */ + case 0xe1: /* loope */ + case 0xe2: /* loop */ + case 0xe3: /* jcxz */ + case 0xe9: /* near relative jump */ + case 0xeb: /* short relative jump */ + break; + case 0x0f: + if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ + break; + return 0; + default: + if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ + break; + return 0; + } + target = (unsigned long)insn->next_byte + insn->immediate.value; + + return (start <= target && target <= start + len); +} + +/* Decode whole function to ensure any instructions don't jump into target */ +static int __kprobes can_optimize(unsigned long paddr) +{ + unsigned long addr, size = 0, offset = 0; + struct insn insn; + kprobe_opcode_t buf[MAX_INSN_SIZE]; + + /* Lookup symbol including addr */ + if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) + return 0; + + /* + * Do not optimize in the entry code due to the unstable + * stack handling. + */ + if ((paddr >= (unsigned long)__entry_text_start) && + (paddr < (unsigned long)__entry_text_end)) + return 0; + + /* Check there is enough space for a relative jump. */ + if (size - offset < RELATIVEJUMP_SIZE) + return 0; + + /* Decode instructions */ + addr = paddr - offset; + while (addr < paddr - offset + size) { /* Decode until function end */ + if (search_exception_tables(addr)) + /* + * Since some fixup code will jumps into this function, + * we can't optimize kprobe in this function. + */ + return 0; + kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr)); + insn_get_length(&insn); + /* Another subsystem puts a breakpoint */ + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + return 0; + /* Recover address */ + insn.kaddr = (void *)addr; + insn.next_byte = (void *)(addr + insn.length); + /* Check any instructions don't jump into target */ + if (insn_is_indirect_jump(&insn) || + insn_jump_into_range(&insn, paddr + INT3_SIZE, + RELATIVE_ADDR_SIZE)) + return 0; + addr += insn.length; + } + + return 1; +} + +/* Check optimized_kprobe can actually be optimized. */ +int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) +{ + int i; + struct kprobe *p; + + for (i = 1; i < op->optinsn.size; i++) { + p = get_kprobe(op->kp.addr + i); + if (p && !kprobe_disabled(p)) + return -EEXIST; + } + + return 0; +} + +/* Check the addr is within the optimized instructions. */ +int __kprobes +arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) +{ + return ((unsigned long)op->kp.addr <= addr && + (unsigned long)op->kp.addr + op->optinsn.size > addr); +} + +/* Free optimized instruction slot */ +static __kprobes +void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) +{ + if (op->optinsn.insn) { + free_optinsn_slot(op->optinsn.insn, dirty); + op->optinsn.insn = NULL; + op->optinsn.size = 0; + } +} + +void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) +{ + __arch_remove_optimized_kprobe(op, 1); +} + +/* + * Copy replacing target instructions + * Target instructions MUST be relocatable (checked inside) + * This is called when new aggr(opt)probe is allocated or reused. + */ +int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) +{ + u8 *buf; + int ret; + long rel; + + if (!can_optimize((unsigned long)op->kp.addr)) + return -EILSEQ; + + op->optinsn.insn = get_optinsn_slot(); + if (!op->optinsn.insn) + return -ENOMEM; + + /* + * Verify if the address gap is in 2GB range, because this uses + * a relative jump. + */ + rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; + if (abs(rel) > 0x7fffffff) + return -ERANGE; + + buf = (u8 *)op->optinsn.insn; + + /* Copy instructions into the out-of-line buffer */ + ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); + if (ret < 0) { + __arch_remove_optimized_kprobe(op, 0); + return ret; + } + op->optinsn.size = ret; + + /* Copy arch-dep-instance from template */ + memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); + + /* Set probe information */ + synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); + + /* Set probe function call */ + synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); + + /* Set returning jmp instruction at the tail of out-of-line buffer */ + synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, + (u8 *)op->kp.addr + op->optinsn.size); + + flush_icache_range((unsigned long) buf, + (unsigned long) buf + TMPL_END_IDX + + op->optinsn.size + RELATIVEJUMP_SIZE); + return 0; +} + +#define MAX_OPTIMIZE_PROBES 256 +static struct text_poke_param *jump_poke_params; +static struct jump_poke_buffer { + u8 buf[RELATIVEJUMP_SIZE]; +} *jump_poke_bufs; + +static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, + u8 *insn_buf, + struct optimized_kprobe *op) +{ + s32 rel = (s32)((long)op->optinsn.insn - + ((long)op->kp.addr + RELATIVEJUMP_SIZE)); + + /* Backup instructions which will be replaced by jump address */ + memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, + RELATIVE_ADDR_SIZE); + + insn_buf[0] = RELATIVEJUMP_OPCODE; + *(s32 *)(&insn_buf[1]) = rel; + + tprm->addr = op->kp.addr; + tprm->opcode = insn_buf; + tprm->len = RELATIVEJUMP_SIZE; +} + +/* + * Replace breakpoints (int3) with relative jumps. + * Caller must call with locking kprobe_mutex and text_mutex. + */ +void __kprobes arch_optimize_kprobes(struct list_head *oplist) +{ + struct optimized_kprobe *op, *tmp; + int c = 0; + + list_for_each_entry_safe(op, tmp, oplist, list) { + WARN_ON(kprobe_disabled(&op->kp)); + /* Setup param */ + setup_optimize_kprobe(&jump_poke_params[c], + jump_poke_bufs[c].buf, op); + list_del_init(&op->list); + if (++c >= MAX_OPTIMIZE_PROBES) + break; + } + + /* + * text_poke_smp doesn't support NMI/MCE code modifying. + * However, since kprobes itself also doesn't support NMI/MCE + * code probing, it's not a problem. + */ + text_poke_smp_batch(jump_poke_params, c); +} + +static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, + u8 *insn_buf, + struct optimized_kprobe *op) +{ + /* Set int3 to first byte for kprobes */ + insn_buf[0] = BREAKPOINT_INSTRUCTION; + memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); + + tprm->addr = op->kp.addr; + tprm->opcode = insn_buf; + tprm->len = RELATIVEJUMP_SIZE; +} + +/* + * Recover original instructions and breakpoints from relative jumps. + * Caller must call with locking kprobe_mutex. + */ +extern void arch_unoptimize_kprobes(struct list_head *oplist, + struct list_head *done_list) +{ + struct optimized_kprobe *op, *tmp; + int c = 0; + + list_for_each_entry_safe(op, tmp, oplist, list) { + /* Setup param */ + setup_unoptimize_kprobe(&jump_poke_params[c], + jump_poke_bufs[c].buf, op); + list_move(&op->list, done_list); + if (++c >= MAX_OPTIMIZE_PROBES) + break; + } + + /* + * text_poke_smp doesn't support NMI/MCE code modifying. + * However, since kprobes itself also doesn't support NMI/MCE + * code probing, it's not a problem. + */ + text_poke_smp_batch(jump_poke_params, c); +} + +/* Replace a relative jump with a breakpoint (int3). */ +void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) +{ + u8 buf[RELATIVEJUMP_SIZE]; + + /* Set int3 to first byte for kprobes */ + buf[0] = BREAKPOINT_INSTRUCTION; + memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); + text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); +} + +int __kprobes +setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) +{ + struct optimized_kprobe *op; + + if (p->flags & KPROBE_FLAG_OPTIMIZED) { + /* This kprobe is really able to run optimized path. */ + op = container_of(p, struct optimized_kprobe, kp); + /* Detour through copied instructions */ + regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; + if (!reenter) + reset_current_kprobe(); + preempt_enable_no_resched(); + return 1; + } + return 0; +} + +int __kprobes arch_init_optprobes(void) +{ + /* Allocate code buffer and parameter array */ + jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * + MAX_OPTIMIZE_PROBES, GFP_KERNEL); + if (!jump_poke_bufs) + return -ENOMEM; + + jump_poke_params = kmalloc(sizeof(struct text_poke_param) * + MAX_OPTIMIZE_PROBES, GFP_KERNEL); + if (!jump_poke_params) { + kfree(jump_poke_bufs); + jump_poke_bufs = NULL; + return -ENOMEM; + } + + return 0; +} -- cgit v1.2.3-59-g8ed1b From b000c8065a92b0fe0e1694f41b2c8d8ba7b7b1ec Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 18 Jan 2013 10:31:20 -0500 Subject: tracing: Remove the extra 4 bytes of padding in events Due to a userspace issue with PowerTop v2beta, which hardcoded the offset of event fields that it was using, it broke when we removed the Big Kernel Lock counter from the event header. (commit e6e1e2593 "tracing: Remove lock_depth from event entry") Because this broke userspace, it was determined that we must keep those 4 bytes around. (commit a3a4a5acd "Regression: partial revert "tracing: Remove lock_depth from event entry"") This unfortunately wastes space in the ring buffer. 4 bytes per event, where a lot of events are just 24 bytes. That's 16% of the buffer wasted. A million events will add 4 megs of white space into the buffer. It was later noticed that PowerTop v2beta could not work on systems where the kernel was 64 bit but the userspace was 32 bits. The reason was because the offsets are different between the two and the hard coded offset of one would not work with the other. With PowerTop v2 final, it implemented the same interface that both perf and trace-cmd use. That is, it reads the format file of the event to find the offsets of the fields it needs. This fixes the problem with running powertop on a 32 bit userspace running on a 64 bit kernel. It also no longer requires the 4 byte padding. As PowerTop v2 has been out for a while, and is included in all major distributions, it is time that we can safely remove the 4 bytes of padding. Users of PowerTop v2beta should upgrade to PowerTop v2 final. Cc: Linus Torvalds Acked-by: Arjan van de Ven Signed-off-by: Steven Rostedt --- include/linux/ftrace_event.h | 1 - kernel/trace/trace.c | 1 - kernel/trace/trace_events.c | 1 - 3 files changed, 3 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 43ef8b6cce7f..6f8d0b77006b 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -49,7 +49,6 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; - int padding; }; #define FTRACE_MAX_EVENT \ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d62248dfda76..a387bd271e71 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1173,7 +1173,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; - entry->padding = 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 880073d0b946..57e9b284250c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -116,7 +116,6 @@ static int trace_define_common_fields(void) __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); - __common_field(int, padding); return ret; } -- cgit v1.2.3-59-g8ed1b From 0a71e4c6d749d06f52e75a406fc9046924fcfcc1 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 22 Jan 2013 12:06:56 -0500 Subject: tracing: Remove trace.h header from trace_clock.c As trace_clock is used by other things besides tracing, and it does not require anything from trace.h, it is best not to include the header file in trace_clock.c. Signed-off-by: Steven Rostedt --- kernel/trace/trace_clock.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 394783531cbb..22b638b28e48 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -21,8 +21,6 @@ #include #include -#include "trace.h" - /* * trace_clock_local(): the simplest and least coherent tracing clock. * -- cgit v1.2.3-59-g8ed1b From 34600f0e9c33c9cd48ae87448205f51332b7d5a0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 22 Jan 2013 13:35:11 -0500 Subject: tracing: Fix race with max_tr and changing tracers There's a race condition between the setting of a new tracer and the update of the max trace buffers (the swap). When a new tracer is added, it sets current_trace to nop_trace before disabling the old tracer. At this moment, if the old tracer uses update_max_tr(), the update may trigger the warning against !current_trace->use_max-tr, as nop_trace doesn't have that set. As update_max_tr() requires that interrupts be disabled, we can add a check to see if current_trace == nop_trace and bail if it does. Then when disabling the current_trace, set it to nop_trace and run synchronize_sched(). This will make sure all calls to update_max_tr() have completed (it was called with interrupts disabled). As a clean up, this commit also removes shrinking and recreating the max_tr buffer if the old and new tracers both have use_max_tr set. The old way use to always shrink the buffer, and then expand it for the next tracer. This is a waste of time. Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a387bd271e71..d2a658349ca1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -709,10 +709,14 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); - if (!current_trace->use_max_tr) { - WARN_ON_ONCE(1); + + /* If we disabled the tracer, stop now */ + if (current_trace == &nop_trace) return; - } + + if (WARN_ON_ONCE(!current_trace->use_max_tr)) + return; + arch_spin_lock(&ftrace_max_lock); tr->buffer = max_tr.buffer; @@ -3185,6 +3189,7 @@ static int tracing_set_tracer(const char *buf) static struct trace_option_dentry *topts; struct trace_array *tr = &global_trace; struct tracer *t; + bool had_max_tr; int ret = 0; mutex_lock(&trace_types_lock); @@ -3211,7 +3216,19 @@ static int tracing_set_tracer(const char *buf) trace_branch_disable(); if (current_trace && current_trace->reset) current_trace->reset(tr); - if (current_trace && current_trace->use_max_tr) { + + had_max_tr = current_trace && current_trace->use_max_tr; + current_trace = &nop_trace; + + if (had_max_tr && !t->use_max_tr) { + /* + * We need to make sure that the update_max_tr sees that + * current_trace changed to nop_trace to keep it from + * swapping the buffers after we resize it. + * The update_max_tr is called from interrupts disabled + * so a synchronized_sched() is sufficient. + */ + synchronize_sched(); /* * We don't free the ring buffer. instead, resize it because * The max_tr ring buffer has some state (e.g. ring->clock) and @@ -3222,10 +3239,8 @@ static int tracing_set_tracer(const char *buf) } destroy_trace_option_files(topts); - current_trace = &nop_trace; - topts = create_trace_option_files(t); - if (t->use_max_tr) { + if (t->use_max_tr && !had_max_tr) { /* we need to make per cpu buffer sizes equivalent */ ret = resize_buffer_duplicate_size(&max_tr, &global_trace, RING_BUFFER_ALL_CPUS); -- cgit v1.2.3-59-g8ed1b From 05cbbf643b8eea1be21082c53cdb856d1dc6d765 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 22 Jan 2013 23:35:11 -0500 Subject: tracing: Fix selftest function recursion accounting The test that checks function recursion does things differently if the arch does not support all ftrace features. But that really doesn't make a difference with how the test runs, and either way the count variable should be 2 at the end. Currently the test wrongly fails for archs that don't support all the ftrace features. Signed-off-by: Steven Rostedt --- kernel/trace/trace_selftest.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 6c62d58d8e87..adb008a0136f 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -452,7 +452,6 @@ trace_selftest_function_recursion(void) char *func_name; int len; int ret; - int cnt; /* The previous test PASSED */ pr_cont("PASSED\n"); @@ -510,19 +509,10 @@ trace_selftest_function_recursion(void) unregister_ftrace_function(&test_recsafe_probe); - /* - * If arch supports all ftrace features, and no other task - * was on the list, we should be fine. - */ - if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC) - cnt = 2; /* Should have recursed */ - else - cnt = 1; - ret = -1; - if (trace_selftest_recursion_cnt != cnt) { - pr_cont("*callback not called expected %d times (%d)* ", - cnt, trace_selftest_recursion_cnt); + if (trace_selftest_recursion_cnt != 2) { + pr_cont("*callback not called expected 2 times (%d)* ", + trace_selftest_recursion_cnt); goto out; } -- cgit v1.2.3-59-g8ed1b From 6350379452ccaeaa71734adf57dec2ebc9207849 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Nov 2012 16:58:56 -0400 Subject: ftrace: Fix global function tracers that are not recursion safe If one of the function tracers set by the global ops is not recursion safe, it can still be called directly without the added recursion supplied by the ftrace infrastructure. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6e34dc162fe1..789cbec24e81 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -221,10 +221,24 @@ static void update_global_ops(void) * registered callers. */ if (ftrace_global_list == &ftrace_list_end || - ftrace_global_list->next == &ftrace_list_end) + ftrace_global_list->next == &ftrace_list_end) { func = ftrace_global_list->func; - else + /* + * As we are calling the function directly. + * If it does not have recursion protection, + * the function_trace_op needs to be updated + * accordingly. + */ + if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) + global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; + else + global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; + } else { func = ftrace_global_list_func; + /* The list has its own recursion protection. */ + global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; + } + /* If we filter on pids, update to use the pid function */ if (!list_empty(&ftrace_pids)) { -- cgit v1.2.3-59-g8ed1b From 9640388b63556b4cfecbb5aaf91a5c99d272f429 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Nov 2012 17:01:20 -0400 Subject: ftrace: Fix function tracing recursion self test The function tracing recursion self test should not crash the machine if the resursion test fails. If it detects that the function tracing is recursing when it should not be, then bail, don't go into an infinite recursive loop. Signed-off-by: Steven Rostedt --- kernel/trace/trace_selftest.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index adb008a0136f..51c819c12c29 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -415,7 +415,8 @@ static void trace_selftest_test_recursion_func(unsigned long ip, * The ftrace infrastructure should provide the recursion * protection. If not, this will crash the kernel! */ - trace_selftest_recursion_cnt++; + if (trace_selftest_recursion_cnt++ > 10) + return; DYN_FTRACE_TEST_NAME(); } -- cgit v1.2.3-59-g8ed1b From 0a016409e42f273415f8225ddf2c58eb2df88034 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Nov 2012 17:03:03 -0400 Subject: ftrace: Optimize the function tracer list loop There is lots of places that perform: op = rcu_dereference_raw(ftrace_control_list); while (op != &ftrace_list_end) { Add a helper macro to do this, and also optimize for a single entity. That is, gcc will optimize a loop for either no iterations or more than one iteration. But usually only a single callback is registered to the function tracer, thus the optimized case should be a single pass. to do this we now do: op = rcu_dereference_raw(list); do { [...] } while (likely(op = rcu_dereference_raw((op)->next)) && unlikely((op) != &ftrace_list_end)); An op is always registered (ftrace_list_end when no callbacks is registered), thus when a single callback is registered, the link list looks like: top => callback => ftrace_list_end => NULL. The likely(op = op->next) still must be performed due to the race of removing the callback, where the first op assignment could equal ftrace_list_end. In that case, the op->next would be NULL. But this is unlikely (only happens in a race condition when removing the callback). But it is very likely that the next op would be ftrace_list_end, unless more than one callback has been registered. This tells gcc what the most common case is and makes the fast path with the least amount of branches. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 48 ++++++++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 789cbec24e81..1330969d8447 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -111,6 +111,26 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) #endif +/* + * Traverse the ftrace_global_list, invoking all entries. The reason that we + * can use rcu_dereference_raw() is that elements removed from this list + * are simply leaked, so there is no need to interact with a grace-period + * mechanism. The rcu_dereference_raw() calls are needed to handle + * concurrent insertions into the ftrace_global_list. + * + * Silly Alpha and silly pointer-speculation compiler optimizations! + */ +#define do_for_each_ftrace_op(op, list) \ + op = rcu_dereference_raw(list); \ + do + +/* + * Optimized for just a single item in the list (as that is the normal case). + */ +#define while_for_each_ftrace_op(op) \ + while (likely(op = rcu_dereference_raw((op)->next)) && \ + unlikely((op) != &ftrace_list_end)) + /** * ftrace_nr_registered_ops - return number of ops registered * @@ -132,15 +152,6 @@ int ftrace_nr_registered_ops(void) return cnt; } -/* - * Traverse the ftrace_global_list, invoking all entries. The reason that we - * can use rcu_dereference_raw() is that elements removed from this list - * are simply leaked, so there is no need to interact with a grace-period - * mechanism. The rcu_dereference_raw() calls are needed to handle - * concurrent insertions into the ftrace_global_list. - * - * Silly Alpha and silly pointer-speculation compiler optimizations! - */ static void ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs) @@ -149,11 +160,9 @@ ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, return; trace_recursion_set(TRACE_GLOBAL_BIT); - op = rcu_dereference_raw(ftrace_global_list); /*see above*/ - while (op != &ftrace_list_end) { + do_for_each_ftrace_op(op, ftrace_global_list) { op->func(ip, parent_ip, op, regs); - op = rcu_dereference_raw(op->next); /*see above*/ - }; + } while_for_each_ftrace_op(op); trace_recursion_clear(TRACE_GLOBAL_BIT); } @@ -4104,14 +4113,11 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, */ preempt_disable_notrace(); trace_recursion_set(TRACE_CONTROL_BIT); - op = rcu_dereference_raw(ftrace_control_list); - while (op != &ftrace_list_end) { + do_for_each_ftrace_op(op, ftrace_control_list) { if (!ftrace_function_local_disabled(op) && ftrace_ops_test(op, ip)) op->func(ip, parent_ip, op, regs); - - op = rcu_dereference_raw(op->next); - }; + } while_for_each_ftrace_op(op); trace_recursion_clear(TRACE_CONTROL_BIT); preempt_enable_notrace(); } @@ -4139,12 +4145,10 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, * they must be freed after a synchronize_sched(). */ preempt_disable_notrace(); - op = rcu_dereference_raw(ftrace_ops_list); - while (op != &ftrace_list_end) { + do_for_each_ftrace_op(op, ftrace_ops_list) { if (ftrace_ops_test(op, ip)) op->func(ip, parent_ip, op, regs); - op = rcu_dereference_raw(op->next); - }; + } while_for_each_ftrace_op(op); preempt_enable_notrace(); trace_recursion_clear(TRACE_INTERNAL_BIT); } -- cgit v1.2.3-59-g8ed1b From c29f122cd7fc178b72b1335b1fce0dff2e5c0f5d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Nov 2012 17:17:59 -0400 Subject: ftrace: Add context level recursion bit checking Currently for recursion checking in the function tracer, ftrace tests a task_struct bit to determine if the function tracer had recursed or not. If it has, then it will will return without going further. But this leads to races. If an interrupt came in after the bit was set, the functions being traced would see that bit set and think that the function tracer recursed on itself, and would return. Instead add a bit for each context (normal, softirq, irq and nmi). A check of which context the task is in is made before testing the associated bit. Now if an interrupt preempts the function tracer after the previous context has been set, the interrupt functions can still be traced. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 40 +++++++++++++++++++++++++++++++++------- kernel/trace/trace.h | 12 +++++++++--- 2 files changed, 42 insertions(+), 10 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1330969d8447..639b6ab1f04c 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -156,14 +156,27 @@ static void ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs) { - if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) + int bit; + + if (in_interrupt()) { + if (in_nmi()) + bit = TRACE_GLOBAL_NMI_BIT; + + else if (in_irq()) + bit = TRACE_GLOBAL_IRQ_BIT; + else + bit = TRACE_GLOBAL_SIRQ_BIT; + } else + bit = TRACE_GLOBAL_BIT; + + if (unlikely(trace_recursion_test(bit))) return; - trace_recursion_set(TRACE_GLOBAL_BIT); + trace_recursion_set(bit); do_for_each_ftrace_op(op, ftrace_global_list) { op->func(ip, parent_ip, op, regs); } while_for_each_ftrace_op(op); - trace_recursion_clear(TRACE_GLOBAL_BIT); + trace_recursion_clear(bit); } static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, @@ -4132,14 +4145,27 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ignored, struct pt_regs *regs) { struct ftrace_ops *op; + unsigned int bit; if (function_trace_stop) return; - if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) - return; + if (in_interrupt()) { + if (in_nmi()) + bit = TRACE_INTERNAL_NMI_BIT; + + else if (in_irq()) + bit = TRACE_INTERNAL_IRQ_BIT; + else + bit = TRACE_INTERNAL_SIRQ_BIT; + } else + bit = TRACE_INTERNAL_BIT; + + if (unlikely(trace_recursion_test(bit))) + return; + + trace_recursion_set(bit); - trace_recursion_set(TRACE_INTERNAL_BIT); /* * Some of the ops may be dynamically allocated, * they must be freed after a synchronize_sched(). @@ -4150,7 +4176,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, op->func(ip, parent_ip, op, regs); } while_for_each_ftrace_op(op); preempt_enable_notrace(); - trace_recursion_clear(TRACE_INTERNAL_BIT); + trace_recursion_clear(bit); } /* diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index c75d7988902c..fe6ccff9cc70 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -299,8 +299,14 @@ struct tracer { /* for function tracing recursion */ #define TRACE_INTERNAL_BIT (1<<11) -#define TRACE_GLOBAL_BIT (1<<12) -#define TRACE_CONTROL_BIT (1<<13) +#define TRACE_INTERNAL_NMI_BIT (1<<12) +#define TRACE_INTERNAL_IRQ_BIT (1<<13) +#define TRACE_INTERNAL_SIRQ_BIT (1<<14) +#define TRACE_GLOBAL_BIT (1<<15) +#define TRACE_GLOBAL_NMI_BIT (1<<16) +#define TRACE_GLOBAL_IRQ_BIT (1<<17) +#define TRACE_GLOBAL_SIRQ_BIT (1<<18) +#define TRACE_CONTROL_BIT (1<<19) /* * Abuse of the trace_recursion. @@ -309,7 +315,7 @@ struct tracer { * was called in irq context but we have irq tracing off. Since this * can only be modified by current, we can reuse trace_recursion. */ -#define TRACE_IRQ_BIT (1<<13) +#define TRACE_IRQ_BIT (1<<20) #define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0) #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0) -- cgit v1.2.3-59-g8ed1b From e46cbf75c621725964fe1f6e7013e8bcd86a0e3d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Nov 2012 17:32:25 -0400 Subject: tracing: Make the trace recursion bits into enums Convert the bits into enums which makes the code a little easier to maintain. Signed-off-by: Steven Rostedt --- kernel/trace/trace.h | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index fe6ccff9cc70..5a095d6f088d 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -298,15 +298,18 @@ struct tracer { #define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff) /* for function tracing recursion */ -#define TRACE_INTERNAL_BIT (1<<11) -#define TRACE_INTERNAL_NMI_BIT (1<<12) -#define TRACE_INTERNAL_IRQ_BIT (1<<13) -#define TRACE_INTERNAL_SIRQ_BIT (1<<14) -#define TRACE_GLOBAL_BIT (1<<15) -#define TRACE_GLOBAL_NMI_BIT (1<<16) -#define TRACE_GLOBAL_IRQ_BIT (1<<17) -#define TRACE_GLOBAL_SIRQ_BIT (1<<18) -#define TRACE_CONTROL_BIT (1<<19) +enum { + TRACE_INTERNAL_BIT = 11, + TRACE_INTERNAL_NMI_BIT, + TRACE_INTERNAL_IRQ_BIT, + TRACE_INTERNAL_SIRQ_BIT, + + TRACE_GLOBAL_BIT, + TRACE_GLOBAL_NMI_BIT, + TRACE_GLOBAL_IRQ_BIT, + TRACE_GLOBAL_SIRQ_BIT, + + TRACE_CONTROL_BIT, /* * Abuse of the trace_recursion. @@ -315,11 +318,12 @@ struct tracer { * was called in irq context but we have irq tracing off. Since this * can only be modified by current, we can reuse trace_recursion. */ -#define TRACE_IRQ_BIT (1<<20) + TRACE_IRQ_BIT, +}; -#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0) -#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0) -#define trace_recursion_test(bit) ((current)->trace_recursion & (bit)) +#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) +#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) +#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) #define TRACE_PIPE_ALL_CPU -1 -- cgit v1.2.3-59-g8ed1b From edc15cafcbfa3d73f819cae99885a2e35e4cbce5 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Nov 2012 17:47:21 -0400 Subject: tracing: Avoid unnecessary multiple recursion checks When function tracing occurs, the following steps are made: If arch does not support a ftrace feature: call internal function (uses INTERNAL bits) which calls... If callback is registered to the "global" list, the list function is called and recursion checks the GLOBAL bits. then this function calls... The function callback, which can use the FTRACE bits to check for recursion. Now if the arch does not suppport a feature, and it calls the global list function which calls the ftrace callback all three of these steps will do a recursion protection. There's no reason to do one if the previous caller already did. The recursion that we are protecting against will go through the same steps again. To prevent the multiple recursion checks, if a recursion bit is set that is higher than the MAX bit of the current check, then we know that the check was made by the previous caller, and we can skip the current check. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 40 +++++-------------- kernel/trace/trace.h | 106 +++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 110 insertions(+), 36 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 639b6ab1f04c..ce8c3d68292f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -158,25 +158,15 @@ ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, { int bit; - if (in_interrupt()) { - if (in_nmi()) - bit = TRACE_GLOBAL_NMI_BIT; - - else if (in_irq()) - bit = TRACE_GLOBAL_IRQ_BIT; - else - bit = TRACE_GLOBAL_SIRQ_BIT; - } else - bit = TRACE_GLOBAL_BIT; - - if (unlikely(trace_recursion_test(bit))) + bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX); + if (bit < 0) return; - trace_recursion_set(bit); do_for_each_ftrace_op(op, ftrace_global_list) { op->func(ip, parent_ip, op, regs); } while_for_each_ftrace_op(op); - trace_recursion_clear(bit); + + trace_clear_recursion(bit); } static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, @@ -4145,26 +4135,14 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ignored, struct pt_regs *regs) { struct ftrace_ops *op; - unsigned int bit; + int bit; if (function_trace_stop) return; - if (in_interrupt()) { - if (in_nmi()) - bit = TRACE_INTERNAL_NMI_BIT; - - else if (in_irq()) - bit = TRACE_INTERNAL_IRQ_BIT; - else - bit = TRACE_INTERNAL_SIRQ_BIT; - } else - bit = TRACE_INTERNAL_BIT; - - if (unlikely(trace_recursion_test(bit))) - return; - - trace_recursion_set(bit); + bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); + if (bit < 0) + return; /* * Some of the ops may be dynamically allocated, @@ -4176,7 +4154,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, op->func(ip, parent_ip, op, regs); } while_for_each_ftrace_op(op); preempt_enable_notrace(); - trace_recursion_clear(bit); + trace_clear_recursion(bit); } /* diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 5a095d6f088d..c203a51dd412 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -297,18 +297,49 @@ struct tracer { /* Ring buffer has the 10 LSB bits to count */ #define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff) -/* for function tracing recursion */ +/* + * For function tracing recursion: + * The order of these bits are important. + * + * When function tracing occurs, the following steps are made: + * If arch does not support a ftrace feature: + * call internal function (uses INTERNAL bits) which calls... + * If callback is registered to the "global" list, the list + * function is called and recursion checks the GLOBAL bits. + * then this function calls... + * The function callback, which can use the FTRACE bits to + * check for recursion. + * + * Now if the arch does not suppport a feature, and it calls + * the global list function which calls the ftrace callback + * all three of these steps will do a recursion protection. + * There's no reason to do one if the previous caller already + * did. The recursion that we are protecting against will + * go through the same steps again. + * + * To prevent the multiple recursion checks, if a recursion + * bit is set that is higher than the MAX bit of the current + * check, then we know that the check was made by the previous + * caller, and we can skip the current check. + */ enum { - TRACE_INTERNAL_BIT = 11, - TRACE_INTERNAL_NMI_BIT, - TRACE_INTERNAL_IRQ_BIT, - TRACE_INTERNAL_SIRQ_BIT, + TRACE_FTRACE_BIT = 11, + TRACE_FTRACE_NMI_BIT, + TRACE_FTRACE_IRQ_BIT, + TRACE_FTRACE_SIRQ_BIT, + /* GLOBAL_BITs must be greater than FTRACE_BITs */ TRACE_GLOBAL_BIT, TRACE_GLOBAL_NMI_BIT, TRACE_GLOBAL_IRQ_BIT, TRACE_GLOBAL_SIRQ_BIT, + /* INTERNAL_BITs must be greater than GLOBAL_BITs */ + TRACE_INTERNAL_BIT, + TRACE_INTERNAL_NMI_BIT, + TRACE_INTERNAL_IRQ_BIT, + TRACE_INTERNAL_SIRQ_BIT, + TRACE_CONTROL_BIT, /* @@ -325,6 +356,71 @@ enum { #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) +#define TRACE_CONTEXT_BITS 4 + +#define TRACE_FTRACE_START TRACE_FTRACE_BIT +#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) + +#define TRACE_GLOBAL_START TRACE_GLOBAL_BIT +#define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1) + +#define TRACE_LIST_START TRACE_INTERNAL_BIT +#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) + +#define TRACE_CONTEXT_MASK TRACE_LIST_MAX + +static __always_inline int trace_get_context_bit(void) +{ + int bit; + + if (in_interrupt()) { + if (in_nmi()) + bit = 0; + + else if (in_irq()) + bit = 1; + else + bit = 2; + } else + bit = 3; + + return bit; +} + +static __always_inline int trace_test_and_set_recursion(int start, int max) +{ + unsigned int val = current->trace_recursion; + int bit; + + /* A previous recursion check was made */ + if ((val & TRACE_CONTEXT_MASK) > max) + return 0; + + bit = trace_get_context_bit() + start; + if (unlikely(val & (1 << bit))) + return -1; + + val |= 1 << bit; + current->trace_recursion = val; + barrier(); + + return bit; +} + +static __always_inline void trace_clear_recursion(int bit) +{ + unsigned int val = current->trace_recursion; + + if (!bit) + return; + + bit = 1 << bit; + val &= ~bit; + + barrier(); + current->trace_recursion = val; +} + #define TRACE_PIPE_ALL_CPU -1 static inline struct ring_buffer_iter * -- cgit v1.2.3-59-g8ed1b From 897f68a48b1f8fb6cb7493e1ee37e3ed7f879937 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Nov 2012 17:52:35 -0400 Subject: ftrace: Use only the preempt version of function tracing The function tracer had two different versions of function tracing. The disabling of irqs version and the preempt disable version. As function tracing in very intrusive and can cause nasty recursion issues, it has its own recursion protection. But the old method to do this was a flat layer. If it detected that a recursion was happening then it would just return without recording. This made the preempt version (much faster than the irq disabling one) not very useful, because if an interrupt were to occur after the recursion flag was set, the interrupt would not be traced at all, because every function that was traced would think it recursed on itself (due to the context it preempted setting the recursive flag). Now that we have a recursion flag for every context level, we no longer need to worry about that. We can disable preemption, set the current context recursion check bit, and go on. If an interrupt were to come along, it would check its own context bit and happily continue to trace. As the preempt version is faster than the irq disable version, there's no more reason to keep the preempt version around. And the irq disable version still had an issue with missing out on tracing NMI code. Remove the irq disable function tracer version and have the preempt disable version be the default (and only version). Before this patch we had from running: # echo function > /debug/tracing/current_tracer # for i in `seq 10`; do ./hackbench 50; done Time: 12.028 Time: 11.945 Time: 11.925 Time: 11.964 Time: 12.002 Time: 11.910 Time: 11.944 Time: 11.929 Time: 11.941 Time: 11.924 (average: 11.9512) Now we have: # echo function > /debug/tracing/current_tracer # for i in `seq 10`; do ./hackbench 50; done Time: 10.285 Time: 10.407 Time: 10.243 Time: 10.372 Time: 10.380 Time: 10.198 Time: 10.272 Time: 10.354 Time: 10.248 Time: 10.253 (average: 10.3012) a 13.8% savings! Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 61 ++++++++++-------------------------------- 1 file changed, 14 insertions(+), 47 deletions(-) diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 8e3ad8082ab7..1c327ef13a9a 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -47,34 +47,6 @@ static void function_trace_start(struct trace_array *tr) tracing_reset_online_cpus(tr); } -static void -function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *op, struct pt_regs *pt_regs) -{ - struct trace_array *tr = func_trace; - struct trace_array_cpu *data; - unsigned long flags; - long disabled; - int cpu; - int pc; - - if (unlikely(!ftrace_function_enabled)) - return; - - pc = preempt_count(); - preempt_disable_notrace(); - local_save_flags(flags); - cpu = raw_smp_processor_id(); - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - - if (likely(disabled == 1)) - trace_function(tr, ip, parent_ip, flags, pc); - - atomic_dec(&data->disabled); - preempt_enable_notrace(); -} - /* Our option */ enum { TRACE_FUNC_OPT_STACK = 0x1, @@ -85,34 +57,34 @@ static struct tracer_flags func_flags; static void function_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) - { struct trace_array *tr = func_trace; struct trace_array_cpu *data; unsigned long flags; - long disabled; + unsigned int bit; int cpu; int pc; if (unlikely(!ftrace_function_enabled)) return; - /* - * Need to use raw, since this must be called before the - * recursive protection is performed. - */ - local_irq_save(flags); - cpu = raw_smp_processor_id(); - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); + pc = preempt_count(); + preempt_disable_notrace(); - if (likely(disabled == 1)) { - pc = preempt_count(); + bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); + if (bit < 0) + goto out; + + cpu = smp_processor_id(); + data = tr->data[cpu]; + if (!atomic_read(&data->disabled)) { + local_save_flags(flags); trace_function(tr, ip, parent_ip, flags, pc); } + trace_clear_recursion(bit); - atomic_dec(&data->disabled); - local_irq_restore(flags); + out: + preempt_enable_notrace(); } static void @@ -185,11 +157,6 @@ static void tracing_start_function_trace(void) { ftrace_function_enabled = 0; - if (trace_flags & TRACE_ITER_PREEMPTONLY) - trace_ops.func = function_trace_call_preempt_only; - else - trace_ops.func = function_trace_call; - if (func_flags.val & TRACE_FUNC_OPT_STACK) register_ftrace_function(&trace_stack_ops); else -- cgit v1.2.3-59-g8ed1b From 567cd4da54ff45513d2ca1f0e3cb9ba45b66d6cf Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Nov 2012 18:33:05 -0400 Subject: ring-buffer: User context bit recursion checking Using context bit recursion checking, we can help increase the performance of the ring buffer. Before this patch: # echo function > /debug/tracing/current_tracer # for i in `seq 10`; do ./hackbench 50; done Time: 10.285 Time: 10.407 Time: 10.243 Time: 10.372 Time: 10.380 Time: 10.198 Time: 10.272 Time: 10.354 Time: 10.248 Time: 10.253 (average: 10.3012) Now we have: # echo function > /debug/tracing/current_tracer # for i in `seq 10`; do ./hackbench 50; done Time: 9.712 Time: 9.824 Time: 9.861 Time: 9.827 Time: 9.962 Time: 9.905 Time: 9.886 Time: 10.088 Time: 9.861 Time: 9.834 (average: 9.876) a 4% savings! Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 85 ++++++++++++++++++++++++++++++++-------------- kernel/trace/trace.h | 13 +++---- 2 files changed, 67 insertions(+), 31 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 6ff9cc4658ed..481e26269281 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2432,41 +2432,76 @@ rb_reserve_next_event(struct ring_buffer *buffer, #ifdef CONFIG_TRACING -#define TRACE_RECURSIVE_DEPTH 16 +/* + * The lock and unlock are done within a preempt disable section. + * The current_context per_cpu variable can only be modified + * by the current task between lock and unlock. But it can + * be modified more than once via an interrupt. To pass this + * information from the lock to the unlock without having to + * access the 'in_interrupt()' functions again (which do show + * a bit of overhead in something as critical as function tracing, + * we use a bitmask trick. + * + * bit 0 = NMI context + * bit 1 = IRQ context + * bit 2 = SoftIRQ context + * bit 3 = normal context. + * + * This works because this is the order of contexts that can + * preempt other contexts. A SoftIRQ never preempts an IRQ + * context. + * + * When the context is determined, the corresponding bit is + * checked and set (if it was set, then a recursion of that context + * happened). + * + * On unlock, we need to clear this bit. To do so, just subtract + * 1 from the current_context and AND it to itself. + * + * (binary) + * 101 - 1 = 100 + * 101 & 100 = 100 (clearing bit zero) + * + * 1010 - 1 = 1001 + * 1010 & 1001 = 1000 (clearing bit 1) + * + * The least significant bit can be cleared this way, and it + * just so happens that it is the same bit corresponding to + * the current context. + */ +static DEFINE_PER_CPU(unsigned int, current_context); -/* Keep this code out of the fast path cache */ -static noinline void trace_recursive_fail(void) +static __always_inline int trace_recursive_lock(void) { - /* Disable all tracing before we do anything else */ - tracing_off_permanent(); - - printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" - "HC[%lu]:SC[%lu]:NMI[%lu]\n", - trace_recursion_buffer(), - hardirq_count() >> HARDIRQ_SHIFT, - softirq_count() >> SOFTIRQ_SHIFT, - in_nmi()); + unsigned int val = this_cpu_read(current_context); + int bit; - WARN_ON_ONCE(1); -} - -static inline int trace_recursive_lock(void) -{ - trace_recursion_inc(); + if (in_interrupt()) { + if (in_nmi()) + bit = 0; + else if (in_irq()) + bit = 1; + else + bit = 2; + } else + bit = 3; - if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH)) - return 0; + if (unlikely(val & (1 << bit))) + return 1; - trace_recursive_fail(); + val |= (1 << bit); + this_cpu_write(current_context, val); - return -1; + return 0; } -static inline void trace_recursive_unlock(void) +static __always_inline void trace_recursive_unlock(void) { - WARN_ON_ONCE(!trace_recursion_buffer()); + unsigned int val = this_cpu_read(current_context); - trace_recursion_dec(); + val--; + val &= this_cpu_read(current_context); + this_cpu_write(current_context, val); } #else diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index c203a51dd412..04a2c7ab1735 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -291,11 +291,6 @@ struct tracer { /* Only current can touch trace_recursion */ -#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0) -#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0) - -/* Ring buffer has the 10 LSB bits to count */ -#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff) /* * For function tracing recursion: @@ -323,7 +318,13 @@ struct tracer { * caller, and we can skip the current check. */ enum { - TRACE_FTRACE_BIT = 11, + TRACE_BUFFER_BIT, + TRACE_BUFFER_NMI_BIT, + TRACE_BUFFER_IRQ_BIT, + TRACE_BUFFER_SIRQ_BIT, + + /* Start of function recursion bits */ + TRACE_FTRACE_BIT, TRACE_FTRACE_NMI_BIT, TRACE_FTRACE_IRQ_BIT, TRACE_FTRACE_SIRQ_BIT, -- cgit v1.2.3-59-g8ed1b From 0b07436d95b5404134da4d661fd183eac863513e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 22 Jan 2013 16:58:30 -0500 Subject: ring-buffer: Remove trace.h from ring_buffer.c ring_buffer.c use to require declarations from trace.h, but these have moved to the generic header files. There's nothing in trace.h that ring_buffer.c requires. There's some headers that trace.h included that ring_buffer.c needs, but it's best that it includes them directly, and not include trace.h. Also, some things may use ring_buffer.c without having tracing configured. This removes the dependency that may come in the future. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 481e26269281..13950d9027cb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3,8 +3,10 @@ * * Copyright (C) 2008 Steven Rostedt */ +#include #include #include +#include #include #include #include @@ -21,7 +23,6 @@ #include #include -#include "trace.h" static void update_pages_handler(struct work_struct *work); -- cgit v1.2.3-59-g8ed1b From d41032a83b4683481cadff84bbf8e0eafeaba830 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 24 Jan 2013 07:52:34 -0500 Subject: tracing: Fix unsigned int compare of zero in recursion check Dan's smatch found a compare bug with the result of the trace_test_and_set_recursion() and comparing to less than zero. If the function fails, it returns -1, but was saved in an unsigned int, which will never be less than zero and will ignore the result of the test if a recursion did happen. Luckily this is the last of the recursion tests, as the infrastructure of ftrace would catch recursions before it got here, except for some few exceptions. Reported-by: Dan Carpenter Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 1c327ef13a9a..601152523326 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -61,7 +61,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, struct trace_array *tr = func_trace; struct trace_array_cpu *data; unsigned long flags; - unsigned int bit; + int bit; int cpu; int pc; -- cgit v1.2.3-59-g8ed1b From ba6fdda46b377034c782c0b89c8f1090b31eabd8 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 22 Dec 2012 16:59:51 +0100 Subject: profiling: Remove unused timer hook The last remaining user was oprofile and its use has been removed a while ago in commit bc078e4eab65f11bba ("oprofile: convert oprofile from timer_hook to hrtimer"). There doesn't seem to be any upstream user of this hook for about two years now. And I'm not even aware of any out of tree user. Let's remove it. Signed-off-by: Frederic Weisbecker Cc: Alessio Igor Bogani Cc: Avi Kivity Cc: Chris Metcalf Cc: Christoph Lameter Cc: Geoff Levand Cc: Gilad Ben Yossef Cc: Hakan Akkan Cc: Paul E. McKenney Cc: Paul Gortmaker Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1356191991-2251-1-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/profile.h | 13 ------------- kernel/profile.c | 24 ------------------------ 2 files changed, 37 deletions(-) diff --git a/include/linux/profile.h b/include/linux/profile.h index a0fc32279fc0..21123902366d 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -82,9 +82,6 @@ int task_handoff_unregister(struct notifier_block * n); int profile_event_register(enum profile_type, struct notifier_block * n); int profile_event_unregister(enum profile_type, struct notifier_block * n); -int register_timer_hook(int (*hook)(struct pt_regs *)); -void unregister_timer_hook(int (*hook)(struct pt_regs *)); - struct pt_regs; #else @@ -135,16 +132,6 @@ static inline int profile_event_unregister(enum profile_type t, struct notifier_ #define profile_handoff_task(a) (0) #define profile_munmap(a) do { } while (0) -static inline int register_timer_hook(int (*hook)(struct pt_regs *)) -{ - return -ENOSYS; -} - -static inline void unregister_timer_hook(int (*hook)(struct pt_regs *)) -{ - return; -} - #endif /* CONFIG_PROFILING */ #endif /* _LINUX_PROFILE_H */ diff --git a/kernel/profile.c b/kernel/profile.c index 1f391819c42f..dc3384ee874e 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -37,9 +37,6 @@ struct profile_hit { #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) -/* Oprofile timer tick hook */ -static int (*timer_hook)(struct pt_regs *) __read_mostly; - static atomic_t *prof_buffer; static unsigned long prof_len, prof_shift; @@ -208,25 +205,6 @@ int profile_event_unregister(enum profile_type type, struct notifier_block *n) } EXPORT_SYMBOL_GPL(profile_event_unregister); -int register_timer_hook(int (*hook)(struct pt_regs *)) -{ - if (timer_hook) - return -EBUSY; - timer_hook = hook; - return 0; -} -EXPORT_SYMBOL_GPL(register_timer_hook); - -void unregister_timer_hook(int (*hook)(struct pt_regs *)) -{ - WARN_ON(hook != timer_hook); - timer_hook = NULL; - /* make sure all CPUs see the NULL hook */ - synchronize_sched(); /* Allow ongoing interrupts to complete. */ -} -EXPORT_SYMBOL_GPL(unregister_timer_hook); - - #ifdef CONFIG_SMP /* * Each cpu has a pair of open-addressed hashtables for pending @@ -436,8 +414,6 @@ void profile_tick(int type) { struct pt_regs *regs = get_irq_regs(); - if (type == CPU_PROFILING && timer_hook) - timer_hook(regs); if (!user_mode(regs) && prof_cpu_mask != NULL && cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) profile_hit(type, (void *)profile_pc(regs)); -- cgit v1.2.3-59-g8ed1b From 9afcf930b1fa1158b0878afeba3eff299300dc65 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 10 Dec 2012 17:29:54 +0900 Subject: perf hists: Exchange order of comparing items when collapsing hists When comparing entries for collapsing put the given entry first, and then the iterated entry. This is not the case of hist_entry__cmp() when called if given sort keys don't require collapsing. So change the order for the sake of consistency. It will be required for matching and/or linking multiple hist entries. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1355128197-18193-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-diff.c | 2 +- tools/perf/util/hist.c | 12 +++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index b2e7d39f099b..4dda6f4dc618 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -285,7 +285,7 @@ static void insert_hist_entry_by_name(struct rb_root *root, while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node); - if (hist_entry__cmp(he, iter) < 0) + if (hist_entry__cmp(iter, he) < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 82df1b26f0d4..3a9ccd09835d 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -285,7 +285,13 @@ static struct hist_entry *add_hist_entry(struct hists *hists, parent = *p; he = rb_entry(parent, struct hist_entry, rb_node_in); - cmp = hist_entry__cmp(entry, he); + /* + * Make sure that it receives arguments in a same order as + * hist_entry__collapse() so that we can use an appropriate + * function when searching an entry regardless which sort + * keys were used. + */ + cmp = hist_entry__cmp(he, entry); if (!cmp) { he_stat__add_period(&he->stat, period); @@ -729,7 +735,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists, parent = *p; he = rb_entry(parent, struct hist_entry, rb_node); - cmp = hist_entry__cmp(pair, he); + cmp = hist_entry__cmp(he, pair); if (!cmp) goto out; @@ -759,7 +765,7 @@ static struct hist_entry *hists__find_entry(struct hists *hists, while (n) { struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); - int64_t cmp = hist_entry__cmp(he, iter); + int64_t cmp = hist_entry__cmp(iter, he); if (cmp < 0) n = n->rb_left; -- cgit v1.2.3-59-g8ed1b From ce74f60eab3cc8b7a3b0cb9c29ec9b1e1abac7d2 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 10 Dec 2012 17:29:55 +0900 Subject: perf hists: Link hist entries before inserting to an output tree For matching and/or linking hist entries, they need to be sorted by given sort keys. However current hists__match/link did this on the output trees, so that the entries in the output tree need to be resort before doing it. This looks not so good since we have trees for collecting or collapsing entries before passing them to an output tree and they're already sorted by the given sort keys. Since we don't need to print anything at the time of matching/linking, we can use these internal trees directly instead of bothering with double resort on the output tree. Its only user - at the time of this writing - perf diff can be easily converted to use the internal tree and can save some lines too by getting rid of unnecessary resorting codes. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1355128197-18193-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-diff.c | 65 +++++++++++++---------------------------------- tools/perf/util/hist.c | 49 ++++++++++++++++++++++++++--------- 2 files changed, 54 insertions(+), 60 deletions(-) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 4dda6f4dc618..8b896f5eded0 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -275,43 +275,6 @@ static struct perf_tool tool = { .ordering_requires_timestamps = true, }; -static void insert_hist_entry_by_name(struct rb_root *root, - struct hist_entry *he) -{ - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - if (hist_entry__cmp(iter, he) < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, root); -} - -static void hists__name_resort(struct hists *self) -{ - struct rb_root tmp = RB_ROOT; - struct rb_node *next = rb_first(&self->entries); - - while (next != NULL) { - struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node); - - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, &self->entries); - insert_hist_entry_by_name(&tmp, n); - } - - self->entries = tmp; -} - static struct perf_evsel *evsel_match(struct perf_evsel *evsel, struct perf_evlist *evlist) { @@ -324,30 +287,34 @@ static struct perf_evsel *evsel_match(struct perf_evsel *evsel, return NULL; } -static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name) +static void perf_evlist__collapse_resort(struct perf_evlist *evlist) { struct perf_evsel *evsel; list_for_each_entry(evsel, &evlist->entries, node) { struct hists *hists = &evsel->hists; - hists__output_resort(hists); - - if (name) - hists__name_resort(hists); + hists__collapse_resort(hists); } } static void hists__baseline_only(struct hists *hists) { - struct rb_node *next = rb_first(&hists->entries); + struct rb_root *root; + struct rb_node *next; + + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + next = rb_first(root); while (next != NULL) { - struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); + struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in); - next = rb_next(&he->rb_node); + next = rb_next(&he->rb_node_in); if (!hist_entry__next_pair(he)) { - rb_erase(&he->rb_node, &hists->entries); + rb_erase(&he->rb_node_in, root); hist_entry__free(he); } } @@ -471,6 +438,8 @@ static void hists__process(struct hists *old, struct hists *new) else hists__link(new, old); + hists__output_resort(new); + if (sort_compute) { hists__precompute(new); hists__compute_resort(new); @@ -505,8 +474,8 @@ static int __cmd_diff(void) evlist_old = older->evlist; evlist_new = newer->evlist; - perf_evlist__resort_hists(evlist_old, true); - perf_evlist__resort_hists(evlist_new, false); + perf_evlist__collapse_resort(evlist_old); + perf_evlist__collapse_resort(evlist_new); list_for_each_entry(evsel, &evlist_new->entries, node) { struct perf_evsel *evsel_old; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 3a9ccd09835d..8ff3c2f8a5dd 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -726,16 +726,24 @@ void hists__inc_nr_events(struct hists *hists, u32 type) static struct hist_entry *hists__add_dummy_entry(struct hists *hists, struct hist_entry *pair) { - struct rb_node **p = &hists->entries.rb_node; + struct rb_root *root; + struct rb_node **p; struct rb_node *parent = NULL; struct hist_entry *he; int cmp; + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + p = &root->rb_node; + while (*p != NULL) { parent = *p; - he = rb_entry(parent, struct hist_entry, rb_node); + he = rb_entry(parent, struct hist_entry, rb_node_in); - cmp = hist_entry__cmp(he, pair); + cmp = hist_entry__collapse(he, pair); if (!cmp) goto out; @@ -750,8 +758,8 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists, if (he) { memset(&he->stat, 0, sizeof(he->stat)); he->hists = hists; - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &hists->entries); + rb_link_node(&he->rb_node_in, parent, p); + rb_insert_color(&he->rb_node_in, root); hists__inc_nr_entries(hists, he); } out: @@ -761,11 +769,16 @@ out: static struct hist_entry *hists__find_entry(struct hists *hists, struct hist_entry *he) { - struct rb_node *n = hists->entries.rb_node; + struct rb_node *n; + + if (sort__need_collapse) + n = hists->entries_collapsed.rb_node; + else + n = hists->entries_in->rb_node; while (n) { - struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); - int64_t cmp = hist_entry__cmp(iter, he); + struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); + int64_t cmp = hist_entry__collapse(iter, he); if (cmp < 0) n = n->rb_left; @@ -783,11 +796,17 @@ static struct hist_entry *hists__find_entry(struct hists *hists, */ void hists__match(struct hists *leader, struct hists *other) { + struct rb_root *root; struct rb_node *nd; struct hist_entry *pos, *pair; - for (nd = rb_first(&leader->entries); nd; nd = rb_next(nd)) { - pos = rb_entry(nd, struct hist_entry, rb_node); + if (sort__need_collapse) + root = &leader->entries_collapsed; + else + root = leader->entries_in; + + for (nd = rb_first(root); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct hist_entry, rb_node_in); pair = hists__find_entry(other, pos); if (pair) @@ -802,11 +821,17 @@ void hists__match(struct hists *leader, struct hists *other) */ int hists__link(struct hists *leader, struct hists *other) { + struct rb_root *root; struct rb_node *nd; struct hist_entry *pos, *pair; - for (nd = rb_first(&other->entries); nd; nd = rb_next(nd)) { - pos = rb_entry(nd, struct hist_entry, rb_node); + if (sort__need_collapse) + root = &other->entries_collapsed; + else + root = other->entries_in; + + for (nd = rb_first(root); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct hist_entry, rb_node_in); if (!hist_entry__has_pairs(pos)) { pair = hists__add_dummy_entry(leader, pos); -- cgit v1.2.3-59-g8ed1b From 66f97ed3ac44c24958171bbc5cc04896147752b7 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 10 Dec 2012 17:29:56 +0900 Subject: perf diff: Use internal rb tree for compute resort There's no reason to run hists_compute_resort() using output tree. Convert it to use internal tree so that it can remove unnecessary _output_resort. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1355128197-18193-4-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-diff.c | 31 +++++++++++++++++++++---------- tools/perf/util/hist.c | 2 +- tools/perf/util/hist.h | 1 + 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 8b896f5eded0..4af0b580b046 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -414,19 +414,30 @@ static void insert_hist_entry_by_compute(struct rb_root *root, static void hists__compute_resort(struct hists *hists) { - struct rb_root tmp = RB_ROOT; - struct rb_node *next = rb_first(&hists->entries); + struct rb_root *root; + struct rb_node *next; + + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + hists->entries = RB_ROOT; + next = rb_first(root); + + hists->nr_entries = 0; + hists->stats.total_period = 0; + hists__reset_col_len(hists); while (next != NULL) { - struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); + struct hist_entry *he; - next = rb_next(&he->rb_node); + he = rb_entry(next, struct hist_entry, rb_node_in); + next = rb_next(&he->rb_node_in); - rb_erase(&he->rb_node, &hists->entries); - insert_hist_entry_by_compute(&tmp, he, compute); + insert_hist_entry_by_compute(&hists->entries, he, compute); + hists__inc_nr_entries(hists, he); } - - hists->entries = tmp; } static void hists__process(struct hists *old, struct hists *new) @@ -438,11 +449,11 @@ static void hists__process(struct hists *old, struct hists *new) else hists__link(new, old); - hists__output_resort(new); - if (sort_compute) { hists__precompute(new); hists__compute_resort(new); + } else { + hists__output_resort(new); } hists__fprintf(new, true, 0, 0, stdout); diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 8ff3c2f8a5dd..37179af74409 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -251,7 +251,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template) return he; } -static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) +void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) { if (!h->filtered) { hists__calc_col_len(hists, h); diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 5b3b0075be64..d3664abea6d6 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -96,6 +96,7 @@ void hists__decay_entries_threaded(struct hists *hists, bool zap_user, bool zap_kernel); void hists__output_recalc_col_len(struct hists *hists, int max_rows); +void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h); void hists__inc_nr_events(struct hists *self, u32 type); size_t hists__fprintf_nr_events(struct hists *self, FILE *fp); -- cgit v1.2.3-59-g8ed1b From f8ebb0cdf30fec2c2a5a364cdda8e6758a44026c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 10 Dec 2012 17:29:57 +0900 Subject: perf test: Add a test case for hists__{match,link} As they are used from diff and event group report, add a test case to verify their behaviors. In this test I made a fake machine and two evsel. Each evsel got 10 samples (so hist entries) - 5 are common and the rests are not. So after hists__match() both of them will have 5 entries with pair set. And the second evsel has a collapsed entry so that the total number is 9 - I made it in order to simulate more realistic case. Thus after hists__link the first entry will have 14 entries - 5 are common (w/ pair), 5 are unmatch (w/o pair) and 4 are dummy (w/ pair). And the second entry will have 9 entries all have its pair. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1355128197-18193-5-git-send-email-namhyung@kernel.org [ committer note: fixed up clashes with cset that moved methods to machine.h ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 1 + tools/perf/tests/builtin-test.c | 4 + tools/perf/tests/hists_link.c | 502 ++++++++++++++++++++++++++++++++++++++++ tools/perf/tests/tests.h | 1 + 4 files changed, 508 insertions(+) create mode 100644 tools/perf/tests/hists_link.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 2cbaad83a6e2..c9f72b18e787 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -487,6 +487,7 @@ LIB_OBJS += $(OUTPUT)tests/rdpmc.o LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o LIB_OBJS += $(OUTPUT)tests/pmu.o +LIB_OBJS += $(OUTPUT)tests/hists_link.o BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o BUILTIN_OBJS += $(OUTPUT)builtin-bench.o diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 186f67535494..479d10484a74 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -68,6 +68,10 @@ static struct test { .desc = "struct perf_event_attr setup", .func = test__attr, }, + { + .desc = "Test matching and linking mutliple hists", + .func = test__hists_link, + }, { .func = NULL, }, diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c new file mode 100644 index 000000000000..0f1aae3b8a99 --- /dev/null +++ b/tools/perf/tests/hists_link.c @@ -0,0 +1,502 @@ +#include "perf.h" +#include "tests.h" +#include "debug.h" +#include "symbol.h" +#include "sort.h" +#include "evsel.h" +#include "evlist.h" +#include "machine.h" +#include "thread.h" +#include "parse-events.h" + +static struct { + u32 pid; + const char *comm; +} fake_threads[] = { + { 100, "perf" }, + { 200, "perf" }, + { 300, "bash" }, +}; + +static struct { + u32 pid; + u64 start; + const char *filename; +} fake_mmap_info[] = { + { 100, 0x40000, "perf" }, + { 100, 0x50000, "libc" }, + { 100, 0xf0000, "[kernel]" }, + { 200, 0x40000, "perf" }, + { 200, 0x50000, "libc" }, + { 200, 0xf0000, "[kernel]" }, + { 300, 0x40000, "bash" }, + { 300, 0x50000, "libc" }, + { 300, 0xf0000, "[kernel]" }, +}; + +struct fake_sym { + u64 start; + u64 length; + const char *name; +}; + +static struct fake_sym perf_syms[] = { + { 700, 100, "main" }, + { 800, 100, "run_command" }, + { 900, 100, "cmd_record" }, +}; + +static struct fake_sym bash_syms[] = { + { 700, 100, "main" }, + { 800, 100, "xmalloc" }, + { 900, 100, "xfree" }, +}; + +static struct fake_sym libc_syms[] = { + { 700, 100, "malloc" }, + { 800, 100, "free" }, + { 900, 100, "realloc" }, +}; + +static struct fake_sym kernel_syms[] = { + { 700, 100, "schedule" }, + { 800, 100, "page_fault" }, + { 900, 100, "sys_perf_event_open" }, +}; + +static struct { + const char *dso_name; + struct fake_sym *syms; + size_t nr_syms; +} fake_symbols[] = { + { "perf", perf_syms, ARRAY_SIZE(perf_syms) }, + { "bash", bash_syms, ARRAY_SIZE(bash_syms) }, + { "libc", libc_syms, ARRAY_SIZE(libc_syms) }, + { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) }, +}; + +static struct machine *setup_fake_machine(void) +{ + struct rb_root machine_root = RB_ROOT; + struct machine *machine; + size_t i; + + machine = machines__findnew(&machine_root, HOST_KERNEL_ID); + if (machine == NULL) { + pr_debug("Not enough memory for machine setup\n"); + return NULL; + } + + for (i = 0; i < ARRAY_SIZE(fake_threads); i++) { + struct thread *thread; + + thread = machine__findnew_thread(machine, fake_threads[i].pid); + if (thread == NULL) + goto out; + + thread__set_comm(thread, fake_threads[i].comm); + } + + for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) { + union perf_event fake_mmap_event = { + .mmap = { + .header = { .misc = PERF_RECORD_MISC_USER, }, + .pid = fake_mmap_info[i].pid, + .start = fake_mmap_info[i].start, + .len = 0x1000ULL, + .pgoff = 0ULL, + }, + }; + + strcpy(fake_mmap_event.mmap.filename, + fake_mmap_info[i].filename); + + machine__process_mmap_event(machine, &fake_mmap_event); + } + + for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) { + size_t k; + struct dso *dso; + + dso = __dsos__findnew(&machine->user_dsos, + fake_symbols[i].dso_name); + if (dso == NULL) + goto out; + + /* emulate dso__load() */ + dso__set_loaded(dso, MAP__FUNCTION); + + for (k = 0; k < fake_symbols[i].nr_syms; k++) { + struct symbol *sym; + struct fake_sym *fsym = &fake_symbols[i].syms[k]; + + sym = symbol__new(fsym->start, fsym->length, + STB_GLOBAL, fsym->name); + if (sym == NULL) + goto out; + + symbols__insert(&dso->symbols[MAP__FUNCTION], sym); + } + } + + return machine; + +out: + pr_debug("Not enough memory for machine setup\n"); + machine__delete_threads(machine); + machine__delete(machine); + return NULL; +} + +struct sample { + u32 pid; + u64 ip; + struct thread *thread; + struct map *map; + struct symbol *sym; +}; + +static struct sample fake_common_samples[] = { + /* perf [kernel] schedule() */ + { .pid = 100, .ip = 0xf0000 + 700, }, + /* perf [perf] main() */ + { .pid = 200, .ip = 0x40000 + 700, }, + /* perf [perf] cmd_record() */ + { .pid = 200, .ip = 0x40000 + 900, }, + /* bash [bash] xmalloc() */ + { .pid = 300, .ip = 0x40000 + 800, }, + /* bash [libc] malloc() */ + { .pid = 300, .ip = 0x50000 + 700, }, +}; + +static struct sample fake_samples[][5] = { + { + /* perf [perf] run_command() */ + { .pid = 100, .ip = 0x40000 + 800, }, + /* perf [libc] malloc() */ + { .pid = 100, .ip = 0x50000 + 700, }, + /* perf [kernel] page_fault() */ + { .pid = 100, .ip = 0xf0000 + 800, }, + /* perf [kernel] sys_perf_event_open() */ + { .pid = 200, .ip = 0xf0000 + 900, }, + /* bash [libc] free() */ + { .pid = 300, .ip = 0x50000 + 800, }, + }, + { + /* perf [libc] free() */ + { .pid = 200, .ip = 0x50000 + 800, }, + /* bash [libc] malloc() */ + { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */ + /* bash [bash] xfee() */ + { .pid = 300, .ip = 0x40000 + 900, }, + /* bash [libc] realloc() */ + { .pid = 300, .ip = 0x50000 + 900, }, + /* bash [kernel] page_fault() */ + { .pid = 300, .ip = 0xf0000 + 800, }, + }, +}; + +static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) +{ + struct perf_evsel *evsel; + struct addr_location al; + struct hist_entry *he; + struct perf_sample sample = { .cpu = 0, }; + size_t i = 0, k; + + /* + * each evsel will have 10 samples - 5 common and 5 distinct. + * However the second evsel also has a collapsed entry for + * "bash [libc] malloc" so total 9 entries will be in the tree. + */ + list_for_each_entry(evsel, &evlist->entries, node) { + for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) { + const union perf_event event = { + .ip = { + .header = { + .misc = PERF_RECORD_MISC_USER, + }, + .pid = fake_common_samples[k].pid, + .ip = fake_common_samples[k].ip, + }, + }; + + if (perf_event__preprocess_sample(&event, machine, &al, + &sample, 0) < 0) + goto out; + + he = __hists__add_entry(&evsel->hists, &al, NULL, 1); + if (he == NULL) + goto out; + + fake_common_samples[k].thread = al.thread; + fake_common_samples[k].map = al.map; + fake_common_samples[k].sym = al.sym; + } + + for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) { + const union perf_event event = { + .ip = { + .header = { + .misc = PERF_RECORD_MISC_USER, + }, + .pid = fake_samples[i][k].pid, + .ip = fake_samples[i][k].ip, + }, + }; + + if (perf_event__preprocess_sample(&event, machine, &al, + &sample, 0) < 0) + goto out; + + he = __hists__add_entry(&evsel->hists, &al, NULL, 1); + if (he == NULL) + goto out; + + fake_samples[i][k].thread = al.thread; + fake_samples[i][k].map = al.map; + fake_samples[i][k].sym = al.sym; + } + i++; + } + + return 0; + +out: + pr_debug("Not enough memory for adding a hist entry\n"); + return -1; +} + +static int find_sample(struct sample *samples, size_t nr_samples, + struct thread *t, struct map *m, struct symbol *s) +{ + while (nr_samples--) { + if (samples->thread == t && samples->map == m && + samples->sym == s) + return 1; + samples++; + } + return 0; +} + +static int __validate_match(struct hists *hists) +{ + size_t count = 0; + struct rb_root *root; + struct rb_node *node; + + /* + * Only entries from fake_common_samples should have a pair. + */ + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + node = rb_first(root); + while (node) { + struct hist_entry *he; + + he = rb_entry(node, struct hist_entry, rb_node_in); + + if (hist_entry__has_pairs(he)) { + if (find_sample(fake_common_samples, + ARRAY_SIZE(fake_common_samples), + he->thread, he->ms.map, he->ms.sym)) { + count++; + } else { + pr_debug("Can't find the matched entry\n"); + return -1; + } + } + + node = rb_next(node); + } + + if (count != ARRAY_SIZE(fake_common_samples)) { + pr_debug("Invalid count for matched entries: %zd of %zd\n", + count, ARRAY_SIZE(fake_common_samples)); + return -1; + } + + return 0; +} + +static int validate_match(struct hists *leader, struct hists *other) +{ + return __validate_match(leader) || __validate_match(other); +} + +static int __validate_link(struct hists *hists, int idx) +{ + size_t count = 0; + size_t count_pair = 0; + size_t count_dummy = 0; + struct rb_root *root; + struct rb_node *node; + + /* + * Leader hists (idx = 0) will have dummy entries from other, + * and some entries will have no pair. However every entry + * in other hists should have (dummy) pair. + */ + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + node = rb_first(root); + while (node) { + struct hist_entry *he; + + he = rb_entry(node, struct hist_entry, rb_node_in); + + if (hist_entry__has_pairs(he)) { + if (!find_sample(fake_common_samples, + ARRAY_SIZE(fake_common_samples), + he->thread, he->ms.map, he->ms.sym) && + !find_sample(fake_samples[idx], + ARRAY_SIZE(fake_samples[idx]), + he->thread, he->ms.map, he->ms.sym)) { + count_dummy++; + } + count_pair++; + } else if (idx) { + pr_debug("A entry from the other hists should have pair\n"); + return -1; + } + + count++; + node = rb_next(node); + } + + /* + * Note that we have a entry collapsed in the other (idx = 1) hists. + */ + if (idx == 0) { + if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) { + pr_debug("Invalid count of dummy entries: %zd of %zd\n", + count_dummy, ARRAY_SIZE(fake_samples[1]) - 1); + return -1; + } + if (count != count_pair + ARRAY_SIZE(fake_samples[0])) { + pr_debug("Invalid count of total leader entries: %zd of %zd\n", + count, count_pair + ARRAY_SIZE(fake_samples[0])); + return -1; + } + } else { + if (count != count_pair) { + pr_debug("Invalid count of total other entries: %zd of %zd\n", + count, count_pair); + return -1; + } + if (count_dummy > 0) { + pr_debug("Other hists should not have dummy entries: %zd\n", + count_dummy); + return -1; + } + } + + return 0; +} + +static int validate_link(struct hists *leader, struct hists *other) +{ + return __validate_link(leader, 0) || __validate_link(other, 1); +} + +static void print_hists(struct hists *hists) +{ + int i = 0; + struct rb_root *root; + struct rb_node *node; + + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + pr_info("----- %s --------\n", __func__); + node = rb_first(root); + while (node) { + struct hist_entry *he; + + he = rb_entry(node, struct hist_entry, rb_node_in); + + pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n", + i, he->thread->comm, he->ms.map->dso->short_name, + he->ms.sym->name, he->stat.period); + + i++; + node = rb_next(node); + } +} + +int test__hists_link(void) +{ + int err = -1; + struct machine *machine = NULL; + struct perf_evsel *evsel, *first; + struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); + + if (evlist == NULL) + return -ENOMEM; + + err = parse_events(evlist, "cpu-clock", 0); + if (err) + goto out; + err = parse_events(evlist, "task-clock", 0); + if (err) + goto out; + + /* default sort order (comm,dso,sym) will be used */ + setup_sorting(NULL, NULL); + + /* setup threads/dso/map/symbols also */ + machine = setup_fake_machine(); + if (!machine) + goto out; + + if (verbose > 1) + machine__fprintf(machine, stderr); + + /* process sample events */ + err = add_hist_entries(evlist, machine); + if (err < 0) + goto out; + + list_for_each_entry(evsel, &evlist->entries, node) { + hists__collapse_resort(&evsel->hists); + + if (verbose > 2) + print_hists(&evsel->hists); + } + + first = perf_evlist__first(evlist); + evsel = perf_evlist__last(evlist); + + /* match common entries */ + hists__match(&first->hists, &evsel->hists); + err = validate_match(&first->hists, &evsel->hists); + if (err) + goto out; + + /* link common and/or dummy entries */ + hists__link(&first->hists, &evsel->hists); + err = validate_link(&first->hists, &evsel->hists); + if (err) + goto out; + + err = 0; + +out: + /* tear down everything */ + perf_evlist__delete(evlist); + + if (machine) { + machine__delete_threads(machine); + machine__delete(machine); + } + + return err; +} diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 0fd94657960e..0bf106978094 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -15,5 +15,6 @@ int test__pmu(void); int test__attr(void); int test__dso_data(void); int test__parse_events(void); +int test__hists_link(void); #endif /* TESTS_H */ -- cgit v1.2.3-59-g8ed1b From 68d20dc172df98a908c1a64fdf09745ded7ec935 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 12 Dec 2012 13:27:03 -0300 Subject: perf test: Remove leftover temp file left by one of the attr tests Instead of > /tmp/krava, direct it to /dev/null. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-oo4yhij2327u8ircz4d0y5p4@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/attr/test-record-group1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1 index 4d688a13fd2f..c5548d054aff 100644 --- a/tools/perf/tests/attr/test-record-group1 +++ b/tools/perf/tests/attr/test-record-group1 @@ -1,6 +1,6 @@ [config] command = record -args = -e '{cycles,instructions}' kill >/tmp/krava 2>&1 +args = -e '{cycles,instructions}' kill >/dev/null 2>&1 [event-1:base-record] fd=1 -- cgit v1.2.3-59-g8ed1b From ce90e3856ba55af4d3d9c9c1168cc624607f6c7c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 18 Dec 2012 14:14:24 -0300 Subject: perf tests: Adjust some message log levels to help diagnosing problems in attr tests Now we'll see the command being run and if it fails, the fields that had unexpected values and the expected values, example testing a problem in the next patch: # perf test -v 13 13: struct perf_event_attr setup : --- start --- SNIP running 'PERF_TEST_ATTR=/tmp/tmpDNIE6M /home/acme/bin/perf record -o /tmp/tmpDNIE6M/perf.data --group -e cycles,instructions kill >/dev/null 2>&1' ret 0 running 'PERF_TEST_ATTR=/tmp/tmpV5lKro /home/acme/bin/perf stat -o /tmp/tmpV5lKro/perf.data -dd kill >/dev/null 2>&1' ret 1 expected config=3, got 65540 expected exclude_guest=1, got 0 FAILED '/home/acme/libexec/perf-core/tests/attr/test-stat-detailed-2' - match failure ---- end ---- struct perf_event_attr setup: FAILED! # While in the past we would see at the '-v' level many more messages for the fields that matched, something we may want to see only in the '-vv' log level. Keeping the 'running' messages so that we can see the tools tests that succeeded so that we can compare it to the one that failed, helping pinpointing the command line switch combo that leads to the problem. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-9avmwxv5ipxyafwqxbk52ylg@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/attr.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py index e702b82dcb86..2f629ca485bc 100644 --- a/tools/perf/tests/attr.py +++ b/tools/perf/tests/attr.py @@ -68,7 +68,7 @@ class Event(dict): self[key] = val def __init__(self, name, data, base): - log.info(" Event %s" % name); + log.debug(" Event %s" % name); self.name = name; self.group = '' self.add(base) @@ -97,6 +97,14 @@ class Event(dict): return False return True + def diff(self, other): + for t in Event.terms: + if not self.has_key(t) or not other.has_key(t): + continue + if not self.compare_data(self[t], other[t]): + log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) + + # Test file description needs to have following sections: # [config] # - just single instance in file @@ -113,7 +121,7 @@ class Test(object): parser = ConfigParser.SafeConfigParser() parser.read(path) - log.warning("running '%s'" % path) + log.debug("running '%s'" % path) self.path = path self.test_dir = options.test_dir @@ -128,7 +136,7 @@ class Test(object): self.expect = {} self.result = {} - log.info(" loading expected events"); + log.debug(" loading expected events"); self.load_events(path, self.expect) def is_event(self, name): @@ -164,7 +172,7 @@ class Test(object): self.perf, self.command, tempdir, self.args) ret = os.WEXITSTATUS(os.system(cmd)) - log.info(" running '%s' ret %d " % (cmd, ret)) + log.warning(" running '%s' ret %d " % (cmd, ret)) if ret != int(self.ret): raise Unsup(self) @@ -172,7 +180,7 @@ class Test(object): def compare(self, expect, result): match = {} - log.info(" compare"); + log.debug(" compare"); # For each expected event find all matching # events in result. Fail if there's not any. @@ -187,10 +195,11 @@ class Test(object): else: log.debug(" ->FAIL"); - log.info(" match: [%s] matches %s" % (exp_name, str(exp_list))) + log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list))) # we did not any matching event - fail if (not exp_list): + exp_event.diff(res_event) raise Fail(self, 'match failure'); match[exp_name] = exp_list @@ -208,10 +217,10 @@ class Test(object): if res_group not in match[group]: raise Fail(self, 'group failure') - log.info(" group: [%s] matches group leader %s" % + log.debug(" group: [%s] matches group leader %s" % (exp_name, str(match[group]))) - log.info(" matched") + log.debug(" matched") def resolve_groups(self, events): for name, event in events.items(): @@ -233,7 +242,7 @@ class Test(object): self.run_cmd(tempdir); # load events expectation for the test - log.info(" loading result events"); + log.debug(" loading result events"); for f in glob.glob(tempdir + '/event*'): self.load_events(f, self.result); -- cgit v1.2.3-59-g8ed1b From 594ac61ad3be9c80c738a9fe3bb95c05d8d1bae1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 13 Dec 2012 13:13:07 -0300 Subject: perf evsel: Do missing feature fallbacks in just one place Instead of doing it in stat, top, record or any other tool that opens event descriptors. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-vr8hzph83d5t2mdlkf565h84@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 36 ------------------------------------ tools/perf/builtin-stat.c | 30 +++--------------------------- tools/perf/builtin-top.c | 20 -------------------- tools/perf/perf.h | 2 -- tools/perf/util/evsel.c | 31 ++++++++++++++++++++++++++++--- tools/perf/util/top.h | 2 -- 6 files changed, 31 insertions(+), 90 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 028de726b832..a4b97269cfc6 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -234,25 +234,6 @@ static int perf_record__open(struct perf_record *rec) list_for_each_entry(pos, &evlist->entries, node) { struct perf_event_attr *attr = &pos->attr; - /* - * Check if parse_single_tracepoint_event has already asked for - * PERF_SAMPLE_TIME. - * - * XXX this is kludgy but short term fix for problems introduced by - * eac23d1c that broke 'perf script' by having different sample_types - * when using multiple tracepoint events when we use a perf binary - * that tries to use sample_id_all on an older kernel. - * - * We need to move counter creation to perf_session, support - * different sample_types, etc. - */ - bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; - -fallback_missing_features: - if (opts->exclude_guest_missing) - attr->exclude_guest = attr->exclude_host = 0; -retry_sample_id: - attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; try_again: if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) { int err = errno; @@ -266,23 +247,6 @@ try_again: " an out-of-range profile CPU?\n"); rc = -err; goto out; - } else if (err == EINVAL) { - if (!opts->exclude_guest_missing && - (attr->exclude_guest || attr->exclude_host)) { - pr_debug("Old kernel, cannot exclude " - "guest or host samples.\n"); - opts->exclude_guest_missing = true; - goto fallback_missing_features; - } else if (!opts->sample_id_all_missing) { - /* - * Old kernel, no attr->sample_id_type_all field - */ - opts->sample_id_all_missing = true; - if (!opts->sample_time && !opts->raw_samples && !time_needed) - perf_evsel__reset_sample_bit(pos, TIME); - - goto retry_sample_id; - } } /* diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c12655af2b88..ef067c193f93 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -132,8 +132,6 @@ static struct stats walltime_nsecs_stats; static int create_perf_stat_counter(struct perf_evsel *evsel) { struct perf_event_attr *attr = &evsel->attr; - bool exclude_guest_missing = false; - int ret; if (scale) attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | @@ -141,16 +139,8 @@ static int create_perf_stat_counter(struct perf_evsel *evsel) attr->inherit = !no_inherit; -retry: - if (exclude_guest_missing) - evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; - - if (perf_target__has_cpu(&target)) { - ret = perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); - if (ret) - goto check_ret; - return 0; - } + if (perf_target__has_cpu(&target)) + return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); if (!perf_target__has_task(&target) && perf_evsel__is_group_leader(evsel)) { @@ -158,21 +148,7 @@ retry: attr->enable_on_exec = 1; } - ret = perf_evsel__open_per_thread(evsel, evsel_list->threads); - if (!ret) - return 0; - /* fall through */ -check_ret: - if (ret && errno == EINVAL) { - if (!exclude_guest_missing && - (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { - pr_debug("Old kernel, cannot exclude " - "guest or host samples.\n"); - exclude_guest_missing = true; - goto retry; - } - } - return ret; + return perf_evsel__open_per_thread(evsel, evsel_list->threads); } /* diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b7d2ea62dbc6..74fca619fc4e 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -900,12 +900,6 @@ static void perf_top__start_counters(struct perf_top *top) list_for_each_entry(counter, &evlist->entries, node) { struct perf_event_attr *attr = &counter->attr; - -fallback_missing_features: - if (top->exclude_guest_missing) - attr->exclude_guest = attr->exclude_host = 0; -retry_sample_id: - attr->sample_id_all = top->sample_id_all_missing ? 0 : 1; try_again: if (perf_evsel__open(counter, top->evlist->cpus, top->evlist->threads) < 0) { @@ -914,20 +908,6 @@ try_again: if (err == EPERM || err == EACCES) { ui__error_paranoid(); goto out_err; - } else if (err == EINVAL) { - if (!top->exclude_guest_missing && - (attr->exclude_guest || attr->exclude_host)) { - pr_debug("Old kernel, cannot exclude " - "guest or host samples.\n"); - top->exclude_guest_missing = true; - goto fallback_missing_features; - } else if (!top->sample_id_all_missing) { - /* - * Old kernel, no attr->sample_id_type_all field - */ - top->sample_id_all_missing = true; - goto retry_sample_id; - } } /* * If it's cycles then fall back to hrtimer diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 2c340e7da458..7622f15bcfb7 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -237,8 +237,6 @@ struct perf_record_opts { bool raw_samples; bool sample_address; bool sample_time; - bool sample_id_all_missing; - bool exclude_guest_missing; bool period; unsigned int freq; unsigned int mmap_pages; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 7a2a3dc3ff03..ee6ee3f45c2a 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -22,6 +22,11 @@ #include #include "perf_regs.h" +static struct { + bool sample_id_all; + bool exclude_guest; +} perf_missing_features; + #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) static int __perf_evsel__sample_size(u64 sample_type) @@ -463,7 +468,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_event_attr *attr = &evsel->attr; int track = !evsel->idx; /* only the first counter needs these */ - attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; + attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; attr->inherit = !opts->no_inherit; perf_evsel__set_sample_bit(evsel, IP); @@ -513,7 +518,7 @@ void perf_evsel__config(struct perf_evsel *evsel, if (opts->period) perf_evsel__set_sample_bit(evsel, PERIOD); - if (!opts->sample_id_all_missing && + if (!perf_missing_features.sample_id_all && (opts->sample_time || !opts->no_inherit || perf_target__has_cpu(&opts->target))) perf_evsel__set_sample_bit(evsel, TIME); @@ -761,6 +766,13 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, pid = evsel->cgrp->fd; } +fallback_missing_features: + if (perf_missing_features.exclude_guest) + evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; +retry_sample_id: + if (perf_missing_features.sample_id_all) + evsel->attr.sample_id_all = 0; + for (cpu = 0; cpu < cpus->nr; cpu++) { for (thread = 0; thread < threads->nr; thread++) { @@ -777,13 +789,26 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, group_fd, flags); if (FD(evsel, cpu, thread) < 0) { err = -errno; - goto out_close; + goto try_fallback; } } } return 0; +try_fallback: + if (err != -EINVAL || cpu > 0 || thread > 0) + goto out_close; + + if (!perf_missing_features.exclude_guest && + (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { + perf_missing_features.exclude_guest = true; + goto fallback_missing_features; + } else if (!perf_missing_features.sample_id_all) { + perf_missing_features.sample_id_all = true; + goto retry_sample_id; + } + out_close: do { while (--thread >= 0) { diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index 927c229c2d9a..7ebf357dc9e1 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -29,8 +29,6 @@ struct perf_top { bool sort_has_symbols; bool kptr_restrict_warned; bool vmlinux_warned; - bool sample_id_all_missing; - bool exclude_guest_missing; bool dump_symtab; struct hist_entry *sym_filter_entry; struct perf_evsel *sym_evsel; -- cgit v1.2.3-59-g8ed1b From c0a54341c0e89333ef201fc3f3001176962f6121 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 13 Dec 2012 14:16:30 -0300 Subject: perf evsel: Introduce event fallback method The only fallback right now is for HW cpu-cycles -> SW cpu-clock, that was done in the same way in both 'top' and 'record'. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-58l1mgibh9oa9m0pd3fasxa5@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 23 +++-------------------- tools/perf/builtin-top.c | 20 +++----------------- tools/perf/util/evsel.c | 28 ++++++++++++++++++++++++++++ tools/perf/util/evsel.h | 3 +++ 4 files changed, 37 insertions(+), 37 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index a4b97269cfc6..b8d0a39b484e 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -224,6 +224,7 @@ static bool perf_evlist__equal(struct perf_evlist *evlist, static int perf_record__open(struct perf_record *rec) { + char msg[128]; struct perf_evsel *pos; struct perf_evlist *evlist = rec->evlist; struct perf_session *session = rec->session; @@ -249,27 +250,9 @@ try_again: goto out; } - /* - * If it's cycles then fall back to hrtimer - * based cpu-clock-tick sw counter, which - * is always available even if no PMU support. - * - * PPC returns ENXIO until 2.6.37 (behavior changed - * with commit b0a873e). - */ - if ((err == ENOENT || err == ENXIO) - && attr->type == PERF_TYPE_HARDWARE - && attr->config == PERF_COUNT_HW_CPU_CYCLES) { - + if (perf_evsel__fallback(pos, err, msg, sizeof(msg))) { if (verbose) - ui__warning("The cycles event is not supported, " - "trying to fall back to cpu-clock-ticks\n"); - attr->type = PERF_TYPE_SOFTWARE; - attr->config = PERF_COUNT_SW_CPU_CLOCK; - if (pos->name) { - free(pos->name); - pos->name = NULL; - } + ui__warning("%s\n", msg); goto try_again; } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 74fca619fc4e..8d41d0b58956 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -892,6 +892,7 @@ static void perf_top__mmap_read(struct perf_top *top) static void perf_top__start_counters(struct perf_top *top) { + char msg[128]; struct perf_evsel *counter; struct perf_evlist *evlist = top->evlist; struct perf_record_opts *opts = &top->record_opts; @@ -909,25 +910,10 @@ try_again: ui__error_paranoid(); goto out_err; } - /* - * If it's cycles then fall back to hrtimer - * based cpu-clock-tick sw counter, which - * is always available even if no PMU support: - */ - if ((err == ENOENT || err == ENXIO) && - (attr->type == PERF_TYPE_HARDWARE) && - (attr->config == PERF_COUNT_HW_CPU_CYCLES)) { + if (perf_evsel__fallback(counter, err, msg, sizeof(msg))) { if (verbose) - ui__warning("Cycles event not supported,\n" - "trying to fall back to cpu-clock-ticks\n"); - - attr->type = PERF_TYPE_SOFTWARE; - attr->config = PERF_COUNT_SW_CPU_CLOCK; - if (counter->name) { - free(counter->name); - counter->name = NULL; - } + ui__warning("%s\n", msg); goto try_again; } diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index ee6ee3f45c2a..0c88e5c12dab 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1378,3 +1378,31 @@ int perf_evsel__fprintf(struct perf_evsel *evsel, fputc('\n', fp); return ++printed; } + +bool perf_evsel__fallback(struct perf_evsel *evsel, int err, + char *msg, size_t msgsize) +{ + if ((err == ENOENT || err == ENXIO) && + evsel->attr.type == PERF_TYPE_HARDWARE && + evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { + /* + * If it's cycles then fall back to hrtimer based + * cpu-clock-tick sw counter, which is always available even if + * no PMU support. + * + * PPC returns ENXIO until 2.6.37 (behavior changed with commit + * b0a873e). + */ + scnprintf(msg, msgsize, "%s", +"The cycles event is not supported, trying to fall back to cpu-clock-ticks"); + + evsel->attr.type = PERF_TYPE_SOFTWARE; + evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK; + + free(evsel->name); + evsel->name = NULL; + return true; + } + + return false; +} diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 9cb8a0215711..26d8cfd86e0a 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -251,4 +251,7 @@ struct perf_attr_details { int perf_evsel__fprintf(struct perf_evsel *evsel, struct perf_attr_details *details, FILE *fp); + +bool perf_evsel__fallback(struct perf_evsel *evsel, int err, + char *msg, size_t msgsize); #endif /* __PERF_EVSEL_H */ -- cgit v1.2.3-59-g8ed1b From 56e52e85366717481cde16b3480f015c7eb32ba3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 13 Dec 2012 15:10:58 -0300 Subject: perf evsel: Introduce perf_evsel__open_strerror method That consolidates the error messages in 'record', 'stat' and 'top', that now get a consistent set of messages and allow other tools to use the new method to report problems using whatever UI toolkit. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-1cudb7wl996kz7ilz83ctvhr@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 54 +++++---------------------------------------- tools/perf/builtin-stat.c | 16 +++++--------- tools/perf/builtin-top.c | 34 +++++----------------------- tools/perf/ui/util.c | 11 --------- tools/perf/util/debug.h | 1 - tools/perf/util/evsel.c | 49 ++++++++++++++++++++++++++++++++++++++++ tools/perf/util/evsel.h | 3 +++ 7 files changed, 68 insertions(+), 100 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b8d0a39b484e..de60dd4ee5d3 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -224,7 +224,7 @@ static bool perf_evlist__equal(struct perf_evlist *evlist, static int perf_record__open(struct perf_record *rec) { - char msg[128]; + char msg[512]; struct perf_evsel *pos; struct perf_evlist *evlist = rec->evlist; struct perf_session *session = rec->session; @@ -234,60 +234,18 @@ static int perf_record__open(struct perf_record *rec) perf_evlist__config(evlist, opts); list_for_each_entry(pos, &evlist->entries, node) { - struct perf_event_attr *attr = &pos->attr; try_again: if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) { - int err = errno; - - if (err == EPERM || err == EACCES) { - ui__error_paranoid(); - rc = -err; - goto out; - } else if (err == ENODEV && opts->target.cpu_list) { - pr_err("No such device - did you specify" - " an out-of-range profile CPU?\n"); - rc = -err; - goto out; - } - - if (perf_evsel__fallback(pos, err, msg, sizeof(msg))) { + if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) { if (verbose) ui__warning("%s\n", msg); goto try_again; } - if (err == ENOENT) { - ui__error("The %s event is not supported.\n", - perf_evsel__name(pos)); - rc = -err; - goto out; - } else if ((err == EOPNOTSUPP) && (attr->precise_ip)) { - ui__error("\'precise\' request may not be supported. " - "Try removing 'p' modifier\n"); - rc = -err; - goto out; - } - - printf("\n"); - error("sys_perf_event_open() syscall returned with %d " - "(%s) for event %s. /bin/dmesg may provide " - "additional information.\n", - err, strerror(err), perf_evsel__name(pos)); - -#if defined(__i386__) || defined(__x86_64__) - if (attr->type == PERF_TYPE_HARDWARE && - err == EOPNOTSUPP) { - pr_err("No hardware sampling interrupt available." - " No APIC? If so then you can boot the kernel" - " with the \"lapic\" boot parameter to" - " force-enable it.\n"); - rc = -err; - goto out; - } -#endif - - pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); - rc = -err; + rc = -errno; + perf_evsel__open_strerror(pos, &opts->target, + errno, msg, sizeof(msg)); + ui__error("%s\n", msg); goto out; } } diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index ef067c193f93..1c2ac148a7d5 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -247,6 +247,7 @@ static int read_counter(struct perf_evsel *counter) static int __run_perf_stat(int argc __maybe_unused, const char **argv) { + char msg[512]; unsigned long long t0, t1; struct perf_evsel *counter; int status = 0; @@ -324,20 +325,13 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv) continue; } - if (errno == EPERM || errno == EACCES) { - error("You may not have permission to collect %sstats.\n" - "\t Consider tweaking" - " /proc/sys/kernel/perf_event_paranoid or running as root.", - target.system_wide ? "system-wide " : ""); - } else { - error("open_counter returned with %d (%s). " - "/bin/dmesg may provide additional information.\n", - errno, strerror(errno)); - } + perf_evsel__open_strerror(counter, &target, + errno, msg, sizeof(msg)); + ui__error("%s\n", msg); + if (child_pid != -1) kill(child_pid, SIGTERM); - pr_err("Not all events could be opened.\n"); return -1; } counter->supported = true; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 8d41d0b58956..f5fd260f7b20 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -892,7 +892,7 @@ static void perf_top__mmap_read(struct perf_top *top) static void perf_top__start_counters(struct perf_top *top) { - char msg[128]; + char msg[512]; struct perf_evsel *counter; struct perf_evlist *evlist = top->evlist; struct perf_record_opts *opts = &top->record_opts; @@ -900,42 +900,18 @@ static void perf_top__start_counters(struct perf_top *top) perf_evlist__config(evlist, opts); list_for_each_entry(counter, &evlist->entries, node) { - struct perf_event_attr *attr = &counter->attr; try_again: if (perf_evsel__open(counter, top->evlist->cpus, top->evlist->threads) < 0) { - int err = errno; - - if (err == EPERM || err == EACCES) { - ui__error_paranoid(); - goto out_err; - } - - if (perf_evsel__fallback(counter, err, msg, sizeof(msg))) { + if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { if (verbose) ui__warning("%s\n", msg); goto try_again; } - if (err == ENOENT) { - ui__error("The %s event is not supported.\n", - perf_evsel__name(counter)); - goto out_err; - } else if (err == EMFILE) { - ui__error("Too many events are opened.\n" - "Try again after reducing the number of events\n"); - goto out_err; - } else if ((err == EOPNOTSUPP) && (attr->precise_ip)) { - ui__error("\'precise\' request may not be supported. " - "Try removing 'p' modifier\n"); - goto out_err; - } - - ui__error("The sys_perf_event_open() syscall " - "returned with %d (%s). /bin/dmesg " - "may provide additional information.\n" - "No CONFIG_PERF_EVENTS=y kernel support " - "configured?\n", err, strerror(err)); + perf_evsel__open_strerror(counter, &opts->target, + errno, msg, sizeof(msg)); + ui__error("%s\n", msg); goto out_err; } } diff --git a/tools/perf/ui/util.c b/tools/perf/ui/util.c index 3014a7cd5271..e3e0a963d03a 100644 --- a/tools/perf/ui/util.c +++ b/tools/perf/ui/util.c @@ -52,17 +52,6 @@ int ui__warning(const char *format, ...) return ret; } -int ui__error_paranoid(void) -{ - return ui__error("Permission error - are you root?\n" - "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" - " -1 - Not paranoid at all\n" - " 0 - Disallow raw tracepoint access for unpriv\n" - " 1 - Disallow cpu events for unpriv\n" - " 2 - Disallow kernel profiling for unpriv\n"); -} - - /** * perf_error__register - Register error logging functions * @eops: The pointer to error logging function struct diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 6e2667fb8211..efbd98805ad0 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -16,6 +16,5 @@ void trace_event(union perf_event *event); int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); -int ui__error_paranoid(void); #endif /* __PERF_DEBUG_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 0c88e5c12dab..e45332d08a58 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1406,3 +1406,52 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err, return false; } + +int perf_evsel__open_strerror(struct perf_evsel *evsel, + struct perf_target *target, + int err, char *msg, size_t size) +{ + switch (err) { + case EPERM: + case EACCES: + return scnprintf(msg, size, "%s", + "You may not have permission to collect %sstats.\n" + "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" + " -1 - Not paranoid at all\n" + " 0 - Disallow raw tracepoint access for unpriv\n" + " 1 - Disallow cpu events for unpriv\n" + " 2 - Disallow kernel profiling for unpriv", + target->system_wide ? "system-wide " : ""); + case ENOENT: + return scnprintf(msg, size, "The %s event is not supported.", + perf_evsel__name(evsel)); + case EMFILE: + return scnprintf(msg, size, "%s", + "Too many events are opened.\n" + "Try again after reducing the number of events."); + case ENODEV: + if (target->cpu_list) + return scnprintf(msg, size, "%s", + "No such device - did you specify an out-of-range profile CPU?\n"); + break; + case EOPNOTSUPP: + if (evsel->attr.precise_ip) + return scnprintf(msg, size, "%s", + "\'precise\' request may not be supported. Try removing 'p' modifier."); +#if defined(__i386__) || defined(__x86_64__) + if (evsel->attr.type == PERF_TYPE_HARDWARE) + return scnprintf(msg, size, "%s", + "No hardware sampling interrupt available.\n" + "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); +#endif + break; + default: + break; + } + + return scnprintf(msg, size, + "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n" + "/bin/dmesg may provide additional information.\n" + "No CONFIG_PERF_EVENTS=y kernel support configured?\n", + err, strerror(err), perf_evsel__name(evsel)); +} diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 26d8cfd86e0a..c68d1b82e843 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -254,4 +254,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel, bool perf_evsel__fallback(struct perf_evsel *evsel, int err, char *msg, size_t msgsize); +int perf_evsel__open_strerror(struct perf_evsel *evsel, + struct perf_target *target, + int err, char *msg, size_t size); #endif /* __PERF_EVSEL_H */ -- cgit v1.2.3-59-g8ed1b From 54359d3340241b4cf53e42ecd5c429d7ba6fbef3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 14 Dec 2012 13:06:13 -0300 Subject: perf test: Check for linking problems in the python binding It just will add the O= builddir to PYTHONPATH and try to 'use perf', which will, in verbose mode show the python backtrace with the missing symbols, such as in the problem fixed in the patch after this one: # perf test -v 15 15: Try 'use perf' in python, checking link problems : --- start --- Traceback (most recent call last): File "", line 1, in ImportError: /home/acme/git/build/perf//python/perf.so: undefined symbol: test_attr__enabled ---- end ---- Try 'use perf' in python, checking link problems: FAILED! # Loooong overdue, done. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-zmd2oq9gz6t1u145ub7qm2nv@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 4 ++++ tools/perf/tests/builtin-test.c | 4 ++++ tools/perf/tests/python-use.c | 23 +++++++++++++++++++++++ tools/perf/tests/tests.h | 1 + 4 files changed, 32 insertions(+) create mode 100644 tools/perf/tests/python-use.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index c9f72b18e787..d0d79574403a 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -488,6 +488,7 @@ LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o LIB_OBJS += $(OUTPUT)tests/pmu.o LIB_OBJS += $(OUTPUT)tests/hists_link.o +LIB_OBJS += $(OUTPUT)tests/python-use.o BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o BUILTIN_OBJS += $(OUTPUT)builtin-bench.o @@ -955,6 +956,9 @@ $(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS '-DBINDIR="$(bindir_SQ)"' \ $< +$(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DPYTHONPATH='"$(OUTPUT)/python"' $< + $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 479d10484a74..a164e4cd5f42 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -72,6 +72,10 @@ static struct test { .desc = "Test matching and linking mutliple hists", .func = test__hists_link, }, + { + .desc = "Try 'use perf' in python, checking link problems", + .func = test__python_use, + }, { .func = NULL, }, diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c new file mode 100644 index 000000000000..15301f4ac3f1 --- /dev/null +++ b/tools/perf/tests/python-use.c @@ -0,0 +1,23 @@ +/* + * Just test if we can load the python binding. + */ + +#include +#include +#include "tests.h" + +extern int verbose; + +int test__python_use(void) +{ + char *cmd; + int ret; + + if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | python %s", + PYTHONPATH, verbose ? "" : "2> /dev/null") < 0) + return -1; + + ret = system(cmd) ? -1 : 0; + free(cmd); + return ret; +} diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 0bf106978094..0ded425b17d6 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -16,5 +16,6 @@ int test__attr(void); int test__dso_data(void); int test__parse_events(void); int test__hists_link(void); +int test__python_use(void); #endif /* TESTS_H */ -- cgit v1.2.3-59-g8ed1b From 0c6332e9d8b8f38a9d5f0cdd66c5575e76ad8995 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 13 Dec 2012 16:43:04 -0300 Subject: perf python: Fix breakage introduced by the test_attr infrastructure The test_attr infrastructure hooks on the sys_perf_event_open call, checking if a variable is set and if so calling a function to intercept calls and do the checking. But both the variable and the function aren't on objects that are linked on the python binding, breaking it: # perf test -v 15 15: Try 'use perf' in python, checking link problems : --- start --- Traceback (most recent call last): File "", line 1, in ImportError: /home/acme/git/build/perf//python/perf.so: undefined symbol: test_attr__enabled ---- end ---- Try 'use perf' in python, checking link problems: FAILED! # Fix it by moving the variable to one of the linked object files and providing a stub for the function in the python.o object, that is only linked in the python binding. Now 'perf test' is happy again: # perf test 15 15: Try 'use perf' in python, checking link problems : Ok # Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-0rsca2kn44b38rgdpr3tz6n5@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/attr.c | 2 -- tools/perf/util/python.c | 9 +++++++++ tools/perf/util/util.c | 2 ++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c index 25638a986257..05b5acbabe5d 100644 --- a/tools/perf/tests/attr.c +++ b/tools/perf/tests/attr.c @@ -33,8 +33,6 @@ extern int verbose; -bool test_attr__enabled; - static char *dir; void test_attr__init(void) diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index a2657fd96837..925e0c3e6d91 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -1045,3 +1045,12 @@ error: if (PyErr_Occurred()) PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); } + +/* + * Dummy, to avoid dragging all the test_attr infrastructure in the python + * binding. + */ +void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, + int fd, int group_fd, unsigned long flags) +{ +} diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 5906e8426cc7..252b889ac8c2 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -12,6 +12,8 @@ */ unsigned int page_size; +bool test_attr__enabled; + bool perf_host = true; bool perf_guest = false; -- cgit v1.2.3-59-g8ed1b From 0bd3f0840bf4bc900b4beb8e792ad499b43dad3b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 17 Dec 2012 14:08:36 +0100 Subject: perf tools: Add missing closedir in multi tracepoint processing We don't close 'events' directory when reading multiple tracepoint events. Adding missing closedir. Signed-off-by: Jiri Olsa Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1355749718-4355-2-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/parse-events.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 2d8d53bec17e..d64ff299a3b3 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -408,6 +408,7 @@ static int add_tracepoint_multi(struct list_head **list, int *idx, ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); } + closedir(evt_dir); return ret; } -- cgit v1.2.3-59-g8ed1b From f35488f97b4b49cb76d87bb7e8da9e93fc70b4e9 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 17 Dec 2012 14:08:37 +0100 Subject: perf tools: Add support for wildcard in tracepoint system name Adding support for wildcards '*?" in the tracepoint system part. It's now possible to open all available tracepoints like: # perf stat -e '*:*' ls You might need to increase limit for open files via ulimit. If ftrace events tracepoints are configured in, the record command fails on above event selection because of them. The stat command disables counters that fails to open, the record command fails completely. We probably want to be smarter here. Signed-off-by: Jiri Olsa Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1355749718-4355-3-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/parse-events.c | 51 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index d64ff299a3b3..626c120f00da 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -380,8 +380,8 @@ static int add_tracepoint(struct list_head **listp, int *idx, return 0; } -static int add_tracepoint_multi(struct list_head **list, int *idx, - char *sys_name, char *evt_name) +static int add_tracepoint_multi_event(struct list_head **list, int *idx, + char *sys_name, char *evt_name) { char evt_path[MAXPATHLEN]; struct dirent *evt_ent; @@ -412,6 +412,46 @@ static int add_tracepoint_multi(struct list_head **list, int *idx, return ret; } +static int add_tracepoint_event(struct list_head **list, int *idx, + char *sys_name, char *evt_name) +{ + return strpbrk(evt_name, "*?") ? + add_tracepoint_multi_event(list, idx, sys_name, evt_name) : + add_tracepoint(list, idx, sys_name, evt_name); +} + +static int add_tracepoint_multi_sys(struct list_head **list, int *idx, + char *sys_name, char *evt_name) +{ + struct dirent *events_ent; + DIR *events_dir; + int ret = 0; + + events_dir = opendir(tracing_events_path); + if (!events_dir) { + perror("Can't open event dir"); + return -1; + } + + while (!ret && (events_ent = readdir(events_dir))) { + if (!strcmp(events_ent->d_name, ".") + || !strcmp(events_ent->d_name, "..") + || !strcmp(events_ent->d_name, "enable") + || !strcmp(events_ent->d_name, "header_event") + || !strcmp(events_ent->d_name, "header_page")) + continue; + + if (!strglobmatch(events_ent->d_name, sys_name)) + continue; + + ret = add_tracepoint_event(list, idx, events_ent->d_name, + evt_name); + } + + closedir(events_dir); + return ret; +} + int parse_events_add_tracepoint(struct list_head **list, int *idx, char *sys, char *event) { @@ -421,9 +461,10 @@ int parse_events_add_tracepoint(struct list_head **list, int *idx, if (ret) return ret; - return strpbrk(event, "*?") ? - add_tracepoint_multi(list, idx, sys, event) : - add_tracepoint(list, idx, sys, event); + if (strpbrk(sys, "*?")) + return add_tracepoint_multi_sys(list, idx, sys, event); + else + return add_tracepoint_event(list, idx, sys, event); } static int -- cgit v1.2.3-59-g8ed1b From 82ce75d93335f7079afc17fb7f2a4e549d2fbecb Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 17 Dec 2012 14:08:38 +0100 Subject: perf tests: Add event parsing test for '*:*' tracepoints Adding event parsing test for '*:*' tracepoints. Checking the count matches all the tracepoints available plus current standard tracepoint perf_event_attr check. This test exposes warnings from traceevent lib about not being able to parse some tracepoints' format data. Exposing these messages in the automated test suite will probably speed up the fix ;-) Signed-off-by: Jiri Olsa Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1355749718-4355-4-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/parse-events.c | 62 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 294ffddfbf42..e7eb708da32c 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -3,6 +3,7 @@ #include "evsel.h" #include "evlist.h" #include "sysfs.h" +#include "debugfs.h" #include "tests.h" #include @@ -782,6 +783,63 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) return 0; } +static int count_tracepoints(void) +{ + char events_path[PATH_MAX]; + struct dirent *events_ent; + DIR *events_dir; + int cnt = 0; + + scnprintf(events_path, PATH_MAX, "%s/tracing/events", + debugfs_find_mountpoint()); + + events_dir = opendir(events_path); + + TEST_ASSERT_VAL("Can't open events dir", events_dir); + + while ((events_ent = readdir(events_dir))) { + char sys_path[PATH_MAX]; + struct dirent *sys_ent; + DIR *sys_dir; + + if (!strcmp(events_ent->d_name, ".") + || !strcmp(events_ent->d_name, "..") + || !strcmp(events_ent->d_name, "enable") + || !strcmp(events_ent->d_name, "header_event") + || !strcmp(events_ent->d_name, "header_page")) + continue; + + scnprintf(sys_path, PATH_MAX, "%s/%s", + events_path, events_ent->d_name); + + sys_dir = opendir(sys_path); + TEST_ASSERT_VAL("Can't open sys dir", sys_dir); + + while ((sys_ent = readdir(sys_dir))) { + if (!strcmp(sys_ent->d_name, ".") + || !strcmp(sys_ent->d_name, "..") + || !strcmp(sys_ent->d_name, "enable") + || !strcmp(sys_ent->d_name, "filter")) + continue; + + cnt++; + } + + closedir(sys_dir); + } + + closedir(events_dir); + return cnt; +} + +static int test__all_tracepoints(struct perf_evlist *evlist) +{ + TEST_ASSERT_VAL("wrong events count", + count_tracepoints() == evlist->nr_entries); + + return test__checkevent_tracepoint_multi(evlist); +} + struct test__event_st { const char *name; __u32 type; @@ -921,6 +979,10 @@ static struct test__event_st test__events[] = { .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles", .check = test__group5, }, + [33] = { + .name = "*:*", + .check = test__all_tracepoints, + }, }; static struct test__event_st test__events_pmu[] = { -- cgit v1.2.3-59-g8ed1b From 000ae33fdb5ff7bc7ae985b43e6278024a4985c2 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 17 Dec 2012 16:25:01 +0900 Subject: perf tests: Check python path on attr and binding test Current perf test code tries to execute python version 2 in order to test attributes on perf_event_open syscall. However it's not default python version anymore a system can have python v3 only or v2 with a different name (e.g. python2). So if there's no such python interpreter with the name 'python', the test would fail like this (yes, it's happened on my new archlinux laptop :). 13: struct perf_event_attr setup :sh: python: command not found FAILED! As we can pass name of the python interpreter on make, use it for the attr test also. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1355729101-31317-1-git-send-email-namhyung@kernel.org [ committer note: Added the same mechanism to the python binding test ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 7 +++++-- tools/perf/tests/attr.c | 2 +- tools/perf/tests/python-use.c | 4 ++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index d0d79574403a..3158f457b6ae 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -953,11 +953,14 @@ $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ - '-DBINDIR="$(bindir_SQ)"' \ + '-DBINDIR="$(bindir_SQ)"' -DPYTHON='"$(PYTHON_WORD)"' \ $< $(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DPYTHONPATH='"$(OUTPUT)/python"' $< + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ + -DPYTHONPATH='"$(OUTPUT)/python"' \ + -DPYTHON='"$(PYTHON_WORD)"' \ + $< $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c index 05b5acbabe5d..f61dd3fb546b 100644 --- a/tools/perf/tests/attr.c +++ b/tools/perf/tests/attr.c @@ -144,7 +144,7 @@ static int run_dir(const char *d, const char *perf) { char cmd[3*PATH_MAX]; - snprintf(cmd, 3*PATH_MAX, "python %s/attr.py -d %s/attr/ -p %s %s", + snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %s", d, d, perf, verbose ? "-v" : ""); return system(cmd); diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c index 15301f4ac3f1..7760277c6def 100644 --- a/tools/perf/tests/python-use.c +++ b/tools/perf/tests/python-use.c @@ -13,8 +13,8 @@ int test__python_use(void) char *cmd; int ret; - if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | python %s", - PYTHONPATH, verbose ? "" : "2> /dev/null") < 0) + if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | %s %s", + PYTHONPATH, PYTHON, verbose ? "" : "2> /dev/null") < 0) return -1; ret = system(cmd) ? -1 : 0; -- cgit v1.2.3-59-g8ed1b From 5323f60c7578e9ddc92d1ca8a2d7b08284624cd1 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 17 Dec 2012 15:38:54 +0900 Subject: perf header: Ensure read/write finished successfully Use readn instead of read and check return value of do_write. Signed-off-by: Namhyung Kim Suggested-by: Arnaldo Carvalho de Melo Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1355726345-29553-4-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 63 +++++++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 27 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index b7da4634a047..bb578d2d10f9 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -148,7 +148,7 @@ static char *do_read_string(int fd, struct perf_header *ph) u32 len; char *buf; - sz = read(fd, &len, sizeof(len)); + sz = readn(fd, &len, sizeof(len)); if (sz < (ssize_t)sizeof(len)) return NULL; @@ -159,7 +159,7 @@ static char *do_read_string(int fd, struct perf_header *ph) if (!buf) return NULL; - ret = read(fd, buf, len); + ret = readn(fd, buf, len); if (ret == (ssize_t)len) { /* * strings are padded by zeroes @@ -1051,16 +1051,25 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, struct perf_pmu *pmu = NULL; off_t offset = lseek(fd, 0, SEEK_CUR); __u32 pmu_num = 0; + int ret; /* write real pmu_num later */ - do_write(fd, &pmu_num, sizeof(pmu_num)); + ret = do_write(fd, &pmu_num, sizeof(pmu_num)); + if (ret < 0) + return ret; while ((pmu = perf_pmu__scan(pmu))) { if (!pmu->name) continue; pmu_num++; - do_write(fd, &pmu->type, sizeof(pmu->type)); - do_write_string(fd, pmu->name); + + ret = do_write(fd, &pmu->type, sizeof(pmu->type)); + if (ret < 0) + return ret; + + ret = do_write_string(fd, pmu->name); + if (ret < 0) + return ret; } if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) { @@ -1209,14 +1218,14 @@ read_event_desc(struct perf_header *ph, int fd) size_t msz; /* number of events */ - ret = read(fd, &nre, sizeof(nre)); + ret = readn(fd, &nre, sizeof(nre)); if (ret != (ssize_t)sizeof(nre)) goto error; if (ph->needs_swap) nre = bswap_32(nre); - ret = read(fd, &sz, sizeof(sz)); + ret = readn(fd, &sz, sizeof(sz)); if (ret != (ssize_t)sizeof(sz)) goto error; @@ -1244,7 +1253,7 @@ read_event_desc(struct perf_header *ph, int fd) * must read entire on-file attr struct to * sync up with layout. */ - ret = read(fd, buf, sz); + ret = readn(fd, buf, sz); if (ret != (ssize_t)sz) goto error; @@ -1253,7 +1262,7 @@ read_event_desc(struct perf_header *ph, int fd) memcpy(&evsel->attr, buf, msz); - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != (ssize_t)sizeof(nr)) goto error; @@ -1274,7 +1283,7 @@ read_event_desc(struct perf_header *ph, int fd) evsel->id = id; for (j = 0 ; j < nr; j++) { - ret = read(fd, id, sizeof(*id)); + ret = readn(fd, id, sizeof(*id)); if (ret != (ssize_t)sizeof(*id)) goto error; if (ph->needs_swap) @@ -1506,14 +1515,14 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, while (offset < limit) { ssize_t len; - if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) + if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) return -1; if (header->needs_swap) perf_event_header__bswap(&old_bev.header); len = old_bev.header.size - sizeof(old_bev); - if (read(input, filename, len) != len) + if (readn(input, filename, len) != len) return -1; bev.header = old_bev.header; @@ -1548,14 +1557,14 @@ static int perf_header__read_build_ids(struct perf_header *header, while (offset < limit) { ssize_t len; - if (read(input, &bev, sizeof(bev)) != sizeof(bev)) + if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) goto out; if (header->needs_swap) perf_event_header__bswap(&bev.header); len = bev.header.size - sizeof(bev); - if (read(input, filename, len) != len) + if (readn(input, filename, len) != len) goto out; /* * The a1645ce1 changeset: @@ -1641,7 +1650,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, size_t ret; u32 nr; - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) return -1; @@ -1650,7 +1659,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, ph->env.nr_cpus_online = nr; - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) return -1; @@ -1684,7 +1693,7 @@ static int process_total_mem(struct perf_file_section *section __maybe_unused, uint64_t mem; size_t ret; - ret = read(fd, &mem, sizeof(mem)); + ret = readn(fd, &mem, sizeof(mem)); if (ret != sizeof(mem)) return -1; @@ -1756,7 +1765,7 @@ static int process_cmdline(struct perf_file_section *section __maybe_unused, u32 nr, i; struct strbuf sb; - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) return -1; @@ -1792,7 +1801,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused char *str; struct strbuf sb; - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) return -1; @@ -1813,7 +1822,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused } ph->env.sibling_cores = strbuf_detach(&sb, NULL); - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) return -1; @@ -1850,7 +1859,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse struct strbuf sb; /* nr nodes */ - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) goto error; @@ -1862,15 +1871,15 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse for (i = 0; i < nr; i++) { /* node number */ - ret = read(fd, &node, sizeof(node)); + ret = readn(fd, &node, sizeof(node)); if (ret != sizeof(node)) goto error; - ret = read(fd, &mem_total, sizeof(u64)); + ret = readn(fd, &mem_total, sizeof(u64)); if (ret != sizeof(u64)) goto error; - ret = read(fd, &mem_free, sizeof(u64)); + ret = readn(fd, &mem_free, sizeof(u64)); if (ret != sizeof(u64)) goto error; @@ -1909,7 +1918,7 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused u32 type; struct strbuf sb; - ret = read(fd, &pmu_num, sizeof(pmu_num)); + ret = readn(fd, &pmu_num, sizeof(pmu_num)); if (ret != sizeof(pmu_num)) return -1; @@ -1925,7 +1934,7 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused strbuf_init(&sb, 128); while (pmu_num) { - if (read(fd, &type, sizeof(type)) != sizeof(type)) + if (readn(fd, &type, sizeof(type)) != sizeof(type)) goto error; if (ph->needs_swap) type = bswap_32(type); @@ -2912,7 +2921,7 @@ int perf_event__process_tracing_data(union perf_event *event, session->repipe); padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; - if (read(session->fd, buf, padding) < 0) + if (readn(session->fd, buf, padding) < 0) die("reading input file"); if (session->repipe) { int retw = write(STDOUT_FILENO, buf, padding); -- cgit v1.2.3-59-g8ed1b From 7e383de42565ecb2cf641fff11946f9bc45e8235 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 18 Dec 2012 15:49:27 -0300 Subject: perf record: Don't pass host machine to guest synthesizer We were calling perf_session__process_machines(), that would first pass the struct machine associated with the host to the provided callback, perf_event__synthesize_guest_os() that would test if it was the host and if so wouldn't do anything. Ditch this contraption, just call directly machines__process with the list of guests. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-x65vsxgzg4dvo3zqohtrrb9o@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 11 ++++------- tools/perf/util/session.h | 9 --------- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index de60dd4ee5d3..69e28950a328 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -328,10 +328,6 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data) { int err; struct perf_tool *tool = data; - - if (machine__is_host(machine)) - return; - /* *As for guest kernel when processing subcommand record&report, *we arrange module mmap prior to guest kernel mmap and trigger @@ -574,9 +570,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" "Check /proc/modules permission or run as root.\n"); - if (perf_guest) - perf_session__process_machines(session, tool, - perf_event__synthesize_guest_os); + if (perf_guest) { + machines__process(&session->machines, + perf_event__synthesize_guest_os, tool); + } if (!opts->target.system_wide) err = perf_event__synthesize_thread_map(tool, evsel_list->threads, diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 8c2302504199..426ca0c3c5b6 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -101,15 +101,6 @@ struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t p return machines__findnew(&self->machines, pid); } -static inline -void perf_session__process_machines(struct perf_session *self, - struct perf_tool *tool, - machine__process_t process) -{ - process(&self->host_machine, tool); - return machines__process(&self->machines, process, tool); -} - struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); size_t perf_session__fprintf(struct perf_session *self, FILE *fp); -- cgit v1.2.3-59-g8ed1b From 52168eea32cc01377b31c1ca9a759eae06830ea0 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 18 Dec 2012 16:02:17 -0300 Subject: perf hists: Rename hists__fprintf_nr_events to events_stats__fprintf As this function deals exclusively with hists->stats. Preparatory patch for removing the by now needless session->hists, that should be just session->stats. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-be0o8si9f1z40cwoa534f7me@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/stdio/hist.c | 6 +++--- tools/perf/util/hist.h | 2 +- tools/perf/util/session.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c index 0eae3b2c32f2..f9798298e3e0 100644 --- a/tools/perf/ui/stdio/hist.c +++ b/tools/perf/ui/stdio/hist.c @@ -459,7 +459,7 @@ out: return ret; } -size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) +size_t events_stats__fprintf(struct events_stats *stats, FILE *fp) { int i; size_t ret = 0; @@ -467,7 +467,7 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { const char *name; - if (hists->stats.nr_events[i] == 0) + if (stats->nr_events[i] == 0) continue; name = perf_event__name(i); @@ -475,7 +475,7 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) continue; ret += fprintf(fp, "%16s events: %10d\n", name, - hists->stats.nr_events[i]); + stats->nr_events[i]); } return ret; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index d3664abea6d6..cb6533b2977a 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -98,7 +98,7 @@ void hists__output_recalc_col_len(struct hists *hists, int max_rows); void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h); void hists__inc_nr_events(struct hists *self, u32 type); -size_t hists__fprintf_nr_events(struct hists *self, FILE *fp); +size_t events_stats__fprintf(struct events_stats *stats, FILE *fp); size_t hists__fprintf(struct hists *self, bool show_header, int max_rows, int max_cols, FILE *fp); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 76d6e257b8a4..6e8dd2abcf0b 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1353,11 +1353,11 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) struct perf_evsel *pos; size_t ret = fprintf(fp, "Aggregated stats:\n"); - ret += hists__fprintf_nr_events(&session->hists, fp); + ret += events_stats__fprintf(&session->hists.stats, fp); list_for_each_entry(pos, &session->evlist->entries, node) { ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); - ret += hists__fprintf_nr_events(&pos->hists, fp); + ret += events_stats__fprintf(&pos->hists.stats, fp); } return ret; -- cgit v1.2.3-59-g8ed1b From 28a6b6aa54878a6a239e901698b3fc111bbcc54f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 18 Dec 2012 16:24:46 -0300 Subject: perf session: There is no need for a per session hists instance It was being used just for its stats member, so ditch session->hists and use just what is needed, session->stats. This completes the move support multiple events in the hists layer, the last user of session->hists was 'perf diff' but Jiri Olsa has fixed that some time ago. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-pimk92kek8kcp4dmb1jakoro@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-sched.c | 6 +++--- tools/perf/builtin-top.c | 4 ++-- tools/perf/util/hist.c | 9 +++++++-- tools/perf/util/hist.h | 1 + tools/perf/util/session.c | 41 ++++++++++++++++++++--------------------- tools/perf/util/session.h | 6 +----- 6 files changed, 34 insertions(+), 33 deletions(-) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index cc28b85dabd5..138229439a93 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1475,9 +1475,9 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy, goto out_delete; } - sched->nr_events = session->hists.stats.nr_events[0]; - sched->nr_lost_events = session->hists.stats.total_lost; - sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; + sched->nr_events = session->stats.nr_events[0]; + sched->nr_lost_events = session->stats.total_lost; + sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST]; } if (destroy) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index f5fd260f7b20..996b10c702ba 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -728,7 +728,7 @@ static void perf_event__process_sample(struct perf_tool *tool, if (!machine) { pr_err("%u unprocessable samples recorded.\n", - top->session->hists.stats.nr_unprocessable_samples++); + top->session->stats.nr_unprocessable_samples++); return; } @@ -878,7 +878,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) hists__inc_nr_events(&evsel->hists, event->header.type); machine__process_event(machine, event); } else - ++session->hists.stats.nr_unknown_events; + ++session->stats.nr_unknown_events; } } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 37179af74409..965ebf948f2f 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -717,10 +717,15 @@ int hist_entry__annotate(struct hist_entry *he, size_t privsize) return symbol__annotate(he->ms.sym, he->ms.map, privsize); } +void events_stats__inc(struct events_stats *stats, u32 type) +{ + ++stats->nr_events[0]; + ++stats->nr_events[type]; +} + void hists__inc_nr_events(struct hists *hists, u32 type) { - ++hists->stats.nr_events[0]; - ++hists->stats.nr_events[type]; + events_stats__inc(&hists->stats, type); } static struct hist_entry *hists__add_dummy_entry(struct hists *hists, diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index cb6533b2977a..38624686ee9a 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -98,6 +98,7 @@ void hists__output_recalc_col_len(struct hists *hists, int max_rows); void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h); void hists__inc_nr_events(struct hists *self, u32 type); +void events_stats__inc(struct events_stats *stats, u32 type); size_t events_stats__fprintf(struct events_stats *stats, FILE *fp); size_t hists__fprintf(struct hists *self, bool show_header, int max_rows, diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 6e8dd2abcf0b..b0bcc328d1fb 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -133,7 +133,6 @@ struct perf_session *perf_session__new(const char *filename, int mode, INIT_LIST_HEAD(&self->ordered_samples.sample_cache); INIT_LIST_HEAD(&self->ordered_samples.to_free); machine__init(&self->host_machine, "", HOST_KERNEL_ID); - hists__init(&self->hists); if (mode == O_RDONLY) { if (perf_session__open(self, force) < 0) @@ -863,11 +862,11 @@ static int perf_session_deliver_event(struct perf_session *session, case PERF_RECORD_SAMPLE: dump_sample(evsel, event, sample); if (evsel == NULL) { - ++session->hists.stats.nr_unknown_id; + ++session->stats.nr_unknown_id; return 0; } if (machine == NULL) { - ++session->hists.stats.nr_unprocessable_samples; + ++session->stats.nr_unprocessable_samples; return 0; } return tool->sample(tool, event, sample, evsel, machine); @@ -881,7 +880,7 @@ static int perf_session_deliver_event(struct perf_session *session, return tool->exit(tool, event, sample, machine); case PERF_RECORD_LOST: if (tool->lost == perf_event__process_lost) - session->hists.stats.total_lost += event->lost.lost; + session->stats.total_lost += event->lost.lost; return tool->lost(tool, event, sample, machine); case PERF_RECORD_READ: return tool->read(tool, event, sample, evsel, machine); @@ -890,7 +889,7 @@ static int perf_session_deliver_event(struct perf_session *session, case PERF_RECORD_UNTHROTTLE: return tool->unthrottle(tool, event, sample, machine); default: - ++session->hists.stats.nr_unknown_events; + ++session->stats.nr_unknown_events; return -1; } } @@ -904,8 +903,8 @@ static int perf_session__preprocess_sample(struct perf_session *session, if (!ip_callchain__valid(sample->callchain, event)) { pr_debug("call-chain problem with event, skipping it.\n"); - ++session->hists.stats.nr_invalid_chains; - session->hists.stats.total_invalid_chains += sample->period; + ++session->stats.nr_invalid_chains; + session->stats.total_invalid_chains += sample->period; return -EINVAL; } return 0; @@ -963,7 +962,7 @@ static int perf_session__process_event(struct perf_session *session, if (event->header.type >= PERF_RECORD_HEADER_MAX) return -EINVAL; - hists__inc_nr_events(&session->hists, event->header.type); + events_stats__inc(&session->stats, event->header.type); if (event->header.type >= PERF_RECORD_USER_TYPE_START) return perf_session__process_user_event(session, event, tool, file_offset); @@ -1018,39 +1017,39 @@ static void perf_session__warn_about_errors(const struct perf_session *session, const struct perf_tool *tool) { if (tool->lost == perf_event__process_lost && - session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { + session->stats.nr_events[PERF_RECORD_LOST] != 0) { ui__warning("Processed %d events and lost %d chunks!\n\n" "Check IO/CPU overload!\n\n", - session->hists.stats.nr_events[0], - session->hists.stats.nr_events[PERF_RECORD_LOST]); + session->stats.nr_events[0], + session->stats.nr_events[PERF_RECORD_LOST]); } - if (session->hists.stats.nr_unknown_events != 0) { + if (session->stats.nr_unknown_events != 0) { ui__warning("Found %u unknown events!\n\n" "Is this an older tool processing a perf.data " "file generated by a more recent tool?\n\n" "If that is not the case, consider " "reporting to linux-kernel@vger.kernel.org.\n\n", - session->hists.stats.nr_unknown_events); + session->stats.nr_unknown_events); } - if (session->hists.stats.nr_unknown_id != 0) { + if (session->stats.nr_unknown_id != 0) { ui__warning("%u samples with id not present in the header\n", - session->hists.stats.nr_unknown_id); + session->stats.nr_unknown_id); } - if (session->hists.stats.nr_invalid_chains != 0) { + if (session->stats.nr_invalid_chains != 0) { ui__warning("Found invalid callchains!\n\n" "%u out of %u events were discarded for this reason.\n\n" "Consider reporting to linux-kernel@vger.kernel.org.\n\n", - session->hists.stats.nr_invalid_chains, - session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); + session->stats.nr_invalid_chains, + session->stats.nr_events[PERF_RECORD_SAMPLE]); } - if (session->hists.stats.nr_unprocessable_samples != 0) { + if (session->stats.nr_unprocessable_samples != 0) { ui__warning("%u unprocessable samples recorded.\n" "Do you have a KVM guest running and not using 'perf kvm'?\n", - session->hists.stats.nr_unprocessable_samples); + session->stats.nr_unprocessable_samples); } } @@ -1353,7 +1352,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) struct perf_evsel *pos; size_t ret = fprintf(fp, "Aggregated stats:\n"); - ret += events_stats__fprintf(&session->hists.stats, fp); + ret += events_stats__fprintf(&session->stats, fp); list_for_each_entry(pos, &session->evlist->entries, node) { ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 426ca0c3c5b6..57066cb867a6 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -34,11 +34,7 @@ struct perf_session { struct rb_root machines; struct perf_evlist *evlist; struct pevent *pevent; - /* - * FIXME: Need to split this up further, we need global - * stats + per event stats. - */ - struct hists hists; + struct events_stats stats; int fd; bool fd_pipe; bool repipe; -- cgit v1.2.3-59-g8ed1b From 876650e6c3209861a8949111140d805b3440951f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 18 Dec 2012 19:15:48 -0300 Subject: perf machine: Introduce struct machines That consolidates the grouping of host + guests, isolating a bit more of functionality now centered on 'perf_session' that can be used independently in tools that don't need a 'perf_session' instance, but needs to have all the thread/map/symbol machinery. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-c700rsiphpmzv8klogojpfut@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 4 +-- tools/perf/builtin-report.c | 2 +- tools/perf/builtin-top.c | 4 +-- tools/perf/tests/hists_link.c | 17 +++++------- tools/perf/util/header.c | 12 ++++---- tools/perf/util/machine.c | 64 ++++++++++++++++++++++++++++--------------- tools/perf/util/machine.h | 32 ++++++++++++++-------- tools/perf/util/session.c | 30 ++++++++------------ tools/perf/util/session.h | 11 ++------ 9 files changed, 95 insertions(+), 81 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 69e28950a328..a0b2427b509d 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -571,8 +571,8 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) "Check /proc/modules permission or run as root.\n"); if (perf_guest) { - machines__process(&session->machines, - perf_event__synthesize_guest_os, tool); + machines__process_guests(&session->machines, + perf_event__synthesize_guest_os, tool); } if (!opts->target.system_wide) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 5134acf1c39a..13cdf61c4f82 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -372,7 +372,7 @@ static int __cmd_report(struct perf_report *rep) if (ret) goto out_delete; - kernel_map = session->host_machine.vmlinux_maps[MAP__FUNCTION]; + kernel_map = session->machines.host.vmlinux_maps[MAP__FUNCTION]; kernel_kmap = map__kmap(kernel_map); if (kernel_map == NULL || (kernel_map->dso->hit && diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 996b10c702ba..e0ecebdde3fe 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -966,10 +966,10 @@ static int __cmd_top(struct perf_top *top) if (perf_target__has_task(&opts->target)) perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, perf_event__process, - &top->session->host_machine); + &top->session->machines.host); else perf_event__synthesize_threads(&top->tool, perf_event__process, - &top->session->host_machine); + &top->session->machines.host); perf_top__start_counters(top); top->session->evlist = top->evlist; perf_session__set_id_hdr_size(top->session); diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c index 0f1aae3b8a99..27860a082381 100644 --- a/tools/perf/tests/hists_link.c +++ b/tools/perf/tests/hists_link.c @@ -75,13 +75,11 @@ static struct { { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) }, }; -static struct machine *setup_fake_machine(void) +static struct machine *setup_fake_machine(struct machines *machines) { - struct rb_root machine_root = RB_ROOT; - struct machine *machine; + struct machine *machine = machines__find(machines, HOST_KERNEL_ID); size_t i; - machine = machines__findnew(&machine_root, HOST_KERNEL_ID); if (machine == NULL) { pr_debug("Not enough memory for machine setup\n"); return NULL; @@ -435,6 +433,7 @@ static void print_hists(struct hists *hists) int test__hists_link(void) { int err = -1; + struct machines machines; struct machine *machine = NULL; struct perf_evsel *evsel, *first; struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); @@ -452,8 +451,10 @@ int test__hists_link(void) /* default sort order (comm,dso,sym) will be used */ setup_sorting(NULL, NULL); + machines__init(&machines); + /* setup threads/dso/map/symbols also */ - machine = setup_fake_machine(); + machine = setup_fake_machine(&machines); if (!machine) goto out; @@ -492,11 +493,7 @@ int test__hists_link(void) out: /* tear down everything */ perf_evlist__delete(evlist); - - if (machine) { - machine__delete_threads(machine); - machine__delete(machine); - } + machines__exit(&machines); return err; } diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index bb578d2d10f9..fccd69dbbbb9 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -287,12 +287,12 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd) struct perf_session *session = container_of(header, struct perf_session, header); struct rb_node *nd; - int err = machine__write_buildid_table(&session->host_machine, fd); + int err = machine__write_buildid_table(&session->machines.host, fd); if (err) return err; - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); err = machine__write_buildid_table(pos, fd); if (err) @@ -448,9 +448,9 @@ static int perf_session__cache_build_ids(struct perf_session *session) if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) return -1; - ret = machine__cache_build_ids(&session->host_machine, debugdir); + ret = machine__cache_build_ids(&session->machines.host, debugdir); - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret |= machine__cache_build_ids(pos, debugdir); } @@ -467,9 +467,9 @@ static bool machine__read_build_ids(struct machine *machine, bool with_hits) static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) { struct rb_node *nd; - bool ret = machine__read_build_ids(&session->host_machine, with_hits); + bool ret = machine__read_build_ids(&session->machines.host, with_hits); - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret |= machine__read_build_ids(pos, with_hits); } diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 71fa90391fe4..efdb38e65a92 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -91,10 +91,22 @@ void machine__delete(struct machine *machine) free(machine); } -struct machine *machines__add(struct rb_root *machines, pid_t pid, +void machines__init(struct machines *machines) +{ + machine__init(&machines->host, "", HOST_KERNEL_ID); + machines->guests = RB_ROOT; +} + +void machines__exit(struct machines *machines) +{ + machine__exit(&machines->host); + /* XXX exit guest */ +} + +struct machine *machines__add(struct machines *machines, pid_t pid, const char *root_dir) { - struct rb_node **p = &machines->rb_node; + struct rb_node **p = &machines->guests.rb_node; struct rb_node *parent = NULL; struct machine *pos, *machine = malloc(sizeof(*machine)); @@ -116,18 +128,21 @@ struct machine *machines__add(struct rb_root *machines, pid_t pid, } rb_link_node(&machine->rb_node, parent, p); - rb_insert_color(&machine->rb_node, machines); + rb_insert_color(&machine->rb_node, &machines->guests); return machine; } -struct machine *machines__find(struct rb_root *machines, pid_t pid) +struct machine *machines__find(struct machines *machines, pid_t pid) { - struct rb_node **p = &machines->rb_node; + struct rb_node **p = &machines->guests.rb_node; struct rb_node *parent = NULL; struct machine *machine; struct machine *default_machine = NULL; + if (pid == HOST_KERNEL_ID) + return &machines->host; + while (*p != NULL) { parent = *p; machine = rb_entry(parent, struct machine, rb_node); @@ -144,7 +159,7 @@ struct machine *machines__find(struct rb_root *machines, pid_t pid) return default_machine; } -struct machine *machines__findnew(struct rb_root *machines, pid_t pid) +struct machine *machines__findnew(struct machines *machines, pid_t pid) { char path[PATH_MAX]; const char *root_dir = ""; @@ -178,12 +193,12 @@ out: return machine; } -void machines__process(struct rb_root *machines, - machine__process_t process, void *data) +void machines__process_guests(struct machines *machines, + machine__process_t process, void *data) { struct rb_node *nd; - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); process(pos, data); } @@ -203,12 +218,14 @@ char *machine__mmap_name(struct machine *machine, char *bf, size_t size) return bf; } -void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size) +void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) { struct rb_node *node; struct machine *machine; - for (node = rb_first(machines); node; node = rb_next(node)) { + machines->host.id_hdr_size = id_hdr_size; + + for (node = rb_first(&machines->guests); node; node = rb_next(node)) { machine = rb_entry(node, struct machine, rb_node); machine->id_hdr_size = id_hdr_size; } @@ -313,12 +330,13 @@ struct map *machine__new_module(struct machine *machine, u64 start, return map; } -size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) +size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) { struct rb_node *nd; - size_t ret = 0; + size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) + + __dsos__fprintf(&machines->host.user_dsos, fp); - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret += __dsos__fprintf(&pos->kernel_dsos, fp); ret += __dsos__fprintf(&pos->user_dsos, fp); @@ -334,13 +352,13 @@ size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm); } -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp, +size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm) { struct rb_node *nd; - size_t ret = 0; + size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); } @@ -511,7 +529,7 @@ void machine__destroy_kernel_maps(struct machine *machine) } } -int machines__create_guest_kernel_maps(struct rb_root *machines) +int machines__create_guest_kernel_maps(struct machines *machines) { int ret = 0; struct dirent **namelist = NULL; @@ -560,20 +578,22 @@ failure: return ret; } -void machines__destroy_guest_kernel_maps(struct rb_root *machines) +void machines__destroy_kernel_maps(struct machines *machines) { - struct rb_node *next = rb_first(machines); + struct rb_node *next = rb_first(&machines->guests); + + machine__destroy_kernel_maps(&machines->host); while (next) { struct machine *pos = rb_entry(next, struct machine, rb_node); next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, machines); + rb_erase(&pos->rb_node, &machines->guests); machine__delete(pos); } } -int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) +int machines__create_kernel_maps(struct machines *machines, pid_t pid) { struct machine *machine = machines__findnew(machines, pid); diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index e11236878ec1..5ac5892f2326 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -47,16 +47,24 @@ int machine__process_event(struct machine *machine, union perf_event *event); typedef void (*machine__process_t)(struct machine *machine, void *data); -void machines__process(struct rb_root *machines, - machine__process_t process, void *data); +struct machines { + struct machine host; + struct rb_root guests; +}; + +void machines__init(struct machines *machines); +void machines__exit(struct machines *machines); + +void machines__process_guests(struct machines *machines, + machine__process_t process, void *data); -struct machine *machines__add(struct rb_root *machines, pid_t pid, +struct machine *machines__add(struct machines *machines, pid_t pid, const char *root_dir); -struct machine *machines__find_host(struct rb_root *machines); -struct machine *machines__find(struct rb_root *machines, pid_t pid); -struct machine *machines__findnew(struct rb_root *machines, pid_t pid); +struct machine *machines__find_host(struct machines *machines); +struct machine *machines__find(struct machines *machines, pid_t pid); +struct machine *machines__findnew(struct machines *machines, pid_t pid); -void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size); +void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size); char *machine__mmap_name(struct machine *machine, char *bf, size_t size); int machine__init(struct machine *machine, const char *root_dir, pid_t pid); @@ -132,17 +140,17 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type, size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm); -size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp, +size_t machines__fprintf_dsos(struct machines *machines, FILE *fp); +size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm); void machine__destroy_kernel_maps(struct machine *machine); int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); int machine__create_kernel_maps(struct machine *machine); -int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); -int machines__create_guest_kernel_maps(struct rb_root *machines); -void machines__destroy_guest_kernel_maps(struct rb_root *machines); +int machines__create_kernel_maps(struct machines *machines, pid_t pid); +int machines__create_guest_kernel_maps(struct machines *machines); +void machines__destroy_kernel_maps(struct machines *machines); size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index b0bcc328d1fb..046b057c8f7f 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -86,13 +86,12 @@ void perf_session__set_id_hdr_size(struct perf_session *session) { u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); - session->host_machine.id_hdr_size = id_hdr_size; machines__set_id_hdr_size(&session->machines, id_hdr_size); } int perf_session__create_kernel_maps(struct perf_session *self) { - int ret = machine__create_kernel_maps(&self->host_machine); + int ret = machine__create_kernel_maps(&self->machines.host); if (ret >= 0) ret = machines__create_guest_kernel_maps(&self->machines); @@ -101,8 +100,7 @@ int perf_session__create_kernel_maps(struct perf_session *self) static void perf_session__destroy_kernel_maps(struct perf_session *self) { - machine__destroy_kernel_maps(&self->host_machine); - machines__destroy_guest_kernel_maps(&self->machines); + machines__destroy_kernel_maps(&self->machines); } struct perf_session *perf_session__new(const char *filename, int mode, @@ -127,12 +125,11 @@ struct perf_session *perf_session__new(const char *filename, int mode, goto out; memcpy(self->filename, filename, len); - self->machines = RB_ROOT; self->repipe = repipe; INIT_LIST_HEAD(&self->ordered_samples.samples); INIT_LIST_HEAD(&self->ordered_samples.sample_cache); INIT_LIST_HEAD(&self->ordered_samples.to_free); - machine__init(&self->host_machine, "", HOST_KERNEL_ID); + machines__init(&self->machines); if (mode == O_RDONLY) { if (perf_session__open(self, force) < 0) @@ -162,12 +159,12 @@ out_delete: static void perf_session__delete_dead_threads(struct perf_session *session) { - machine__delete_dead_threads(&session->host_machine); + machine__delete_dead_threads(&session->machines.host); } static void perf_session__delete_threads(struct perf_session *session) { - machine__delete_threads(&session->host_machine); + machine__delete_threads(&session->machines.host); } static void perf_session_env__delete(struct perf_session_env *env) @@ -192,7 +189,7 @@ void perf_session__delete(struct perf_session *self) perf_session__delete_dead_threads(self); perf_session__delete_threads(self); perf_session_env__delete(&self->header.env); - machine__exit(&self->host_machine); + machines__exit(&self->machines); close(self->fd); free(self); vdso__exit(); @@ -998,7 +995,7 @@ void perf_event_header__bswap(struct perf_event_header *self) struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) { - return machine__findnew_thread(&session->host_machine, pid); + return machine__findnew_thread(&session->machines.host, pid); } static struct thread *perf_session__register_idle_thread(struct perf_session *self) @@ -1335,16 +1332,13 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps, size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) { - return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + - __dsos__fprintf(&self->host_machine.user_dsos, fp) + - machines__fprintf_dsos(&self->machines, fp); + return machines__fprintf_dsos(&self->machines, fp); } size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm) { - size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, skip, parm); - return ret + machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm); + return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm); } size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) @@ -1368,7 +1362,7 @@ size_t perf_session__fprintf(struct perf_session *session, FILE *fp) * FIXME: Here we have to actually print all the machines in this * session, not just the host... */ - return machine__fprintf(&session->host_machine, fp); + return machine__fprintf(&session->machines.host, fp); } void perf_session__remove_thread(struct perf_session *session, @@ -1377,10 +1371,10 @@ void perf_session__remove_thread(struct perf_session *session, /* * FIXME: This one makes no sense, we need to remove the thread from * the machine it belongs to, perf_session can have many machines, so - * doing it always on ->host_machine is wrong. Fix when auditing all + * doing it always on ->machines.host is wrong. Fix when auditing all * the 'perf kvm' code. */ - machine__remove_thread(&session->host_machine, th); + machine__remove_thread(&session->machines.host, th); } struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 57066cb867a6..de4e68716626 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -30,8 +30,7 @@ struct ordered_samples { struct perf_session { struct perf_header header; unsigned long size; - struct machine host_machine; - struct rb_root machines; + struct machines machines; struct perf_evlist *evlist; struct pevent *pevent; struct events_stats stats; @@ -49,7 +48,7 @@ struct perf_tool; struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe, struct perf_tool *tool); -void perf_session__delete(struct perf_session *self); +void perf_session__delete(struct perf_session *session); void perf_event_header__bswap(struct perf_event_header *self); @@ -78,22 +77,18 @@ void perf_session__remove_thread(struct perf_session *self, struct thread *th); static inline struct machine *perf_session__find_host_machine(struct perf_session *self) { - return &self->host_machine; + return &self->machines.host; } static inline struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid) { - if (pid == HOST_KERNEL_ID) - return &self->host_machine; return machines__find(&self->machines, pid); } static inline struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid) { - if (pid == HOST_KERNEL_ID) - return &self->host_machine; return machines__findnew(&self->machines, pid); } -- cgit v1.2.3-59-g8ed1b From 09ecbb07a5c9fa31dc72ee8fb1c3fbd1145448dd Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 19 Dec 2012 09:52:21 +0100 Subject: perf tests: Fix PYTHONPATH for python-use test tracepoints If there's not OUTPUT variable defined the PYTHONPATH ends up with /python. We need to remove the extra '/'. Signed-off-by: Jiri Olsa Link: http://lkml.kernel.org/n/tip-h1hzfyfcdxjnuq9fin2cjwlr@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 3158f457b6ae..1539eb4d97a0 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -958,7 +958,7 @@ $(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ - -DPYTHONPATH='"$(OUTPUT)/python"' \ + -DPYTHONPATH='"$(OUTPUT)python"' \ -DPYTHON='"$(PYTHON_WORD)"' \ $< -- cgit v1.2.3-59-g8ed1b From 34ba5122bf198c9cdfcbecc9b23eaa414244a3f6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 19 Dec 2012 09:04:24 -0300 Subject: perf machine: Simplify accessing the host machine It is always there, no sense in calling a function named "perf_session__find_host_machine". Also no sense in checking if that function return is NULL, so ditch needless error handling. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-a6a3zx3afbrxo8p2zqm5mxo8@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-kmem.c | 7 +------ tools/perf/builtin-record.c | 7 +------ tools/perf/builtin-top.c | 4 ++-- tools/perf/util/session.c | 2 +- tools/perf/util/session.h | 6 ------ 5 files changed, 5 insertions(+), 21 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 0b4b796167be..9fddca749ca2 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -340,7 +340,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session, int n_lines, int is_caller) { struct rb_node *next; - struct machine *machine; + struct machine *machine = &session->machines.host; printf("%.102s\n", graph_dotted_line); printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr"); @@ -349,11 +349,6 @@ static void __print_result(struct rb_root *root, struct perf_session *session, next = rb_first(root); - machine = perf_session__find_host_machine(session); - if (!machine) { - pr_err("__print_result: couldn't find kernel information\n"); - return; - } while (next && n_lines--) { struct alloc_stat *data = rb_entry(next, struct alloc_stat, node); diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index a0b2427b509d..2ac690cad411 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -512,12 +512,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) rec->post_processing_offset = lseek(output, 0, SEEK_CUR); - machine = perf_session__find_host_machine(session); - if (!machine) { - pr_err("Couldn't find native kernel information.\n"); - err = -1; - goto out_delete_session; - } + machine = &session->machines.host; if (opts->pipe_output) { err = perf_event__synthesize_attrs(tool, session, diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index e0ecebdde3fe..bc788126397b 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -847,13 +847,13 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) ++top->us_samples; if (top->hide_user_symbols) continue; - machine = perf_session__find_host_machine(session); + machine = &session->machines.host; break; case PERF_RECORD_MISC_KERNEL: ++top->kernel_samples; if (top->hide_kernel_symbols) continue; - machine = perf_session__find_host_machine(session); + machine = &session->machines.host; break; case PERF_RECORD_MISC_GUEST_KERNEL: ++top->guest_kernel_samples; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 046b057c8f7f..bd85280bb6e8 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -821,7 +821,7 @@ static struct machine * return perf_session__findnew_machine(session, pid); } - return perf_session__find_host_machine(session); + return &session->machines.host; } static int perf_session_deliver_event(struct perf_session *session, diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index de4e68716626..b5c0847edfa9 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -74,12 +74,6 @@ int perf_session__create_kernel_maps(struct perf_session *self); void perf_session__set_id_hdr_size(struct perf_session *session); void perf_session__remove_thread(struct perf_session *self, struct thread *th); -static inline -struct machine *perf_session__find_host_machine(struct perf_session *self) -{ - return &self->machines.host; -} - static inline struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid) { -- cgit v1.2.3-59-g8ed1b From 20914ce5b9e1ef4a35f1f09a2c9c8fb8eb1c4d86 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 19 Dec 2012 10:59:29 -0300 Subject: perf kvm: Initialize file_name var to fix segfault The 3786063 commit: perf kvm: Rename perf_kvm to perf_kvm_stat Moved the file_name from inside a local struct var that initialized some of its members, thus zero initializing the not explicitely initialized variables, one of which was 'file_name', to a standalone local variable, but forgot to initialize it explicitely to NULL, so it then got some undefined value, causing a segfault in strdup when it wasn't, by luck, zero. Fix it by explicitely initializing it to NULL. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Xiao Guangrong Link: http://lkml.kernel.org/n/tip-qo2jevp1bdcnh8khzdazs17s@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-kvm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index ca3f80ebc100..37a769d7f9fe 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c @@ -973,8 +973,7 @@ __cmd_buildid_list(const char *file_name, int argc, const char **argv) int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) { - const char *file_name; - + const char *file_name = NULL; const struct option kvm_options[] = { OPT_STRING('i', "input", &file_name, "file", "Input file name"), -- cgit v1.2.3-59-g8ed1b From f4c1ea5f2a6b9e1c0aaa874ffb25fe4a4f9f1a3f Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 19 Dec 2012 11:33:39 -0300 Subject: perf tests: Add return states enum for tests Test can currently return one of 3 states: ok, fail, skip. The ok and fail states are self-explanatory. The skip state means that some of the conditions for running the test was not met, making it impossible to even run the test. For instance, if the hardware doesn't support the 'precise' level required by a test, it will be skipped. Signed-off-by: Jiri Olsa Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/n/tip-04vnsdndarctfb1eii5c9hcy@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/builtin-test.c | 15 ++++++++++++--- tools/perf/tests/tests.h | 6 ++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index a164e4cd5f42..6a5dee2377b0 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -129,10 +129,19 @@ static int __cmd_test(int argc, const char *argv[]) pr_debug("\n--- start ---\n"); err = tests[curr].func(); pr_debug("---- end ----\n%s:", tests[curr].desc); - if (err) - color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); - else + + switch (err) { + case TEST_OK: pr_info(" Ok\n"); + break; + case TEST_SKIP: + color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n"); + break; + case TEST_FAIL: + default: + color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); + break; + } } return 0; diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 0ded425b17d6..5de0be1ff4b6 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -1,6 +1,12 @@ #ifndef TESTS_H #define TESTS_H +enum { + TEST_OK = 0, + TEST_FAIL = -1, + TEST_SKIP = -2, +}; + /* Tests */ int test__vmlinux_matches_kallsyms(void); int test__open_syscall_event(void); -- cgit v1.2.3-59-g8ed1b From 531f67bb9ff4069b629762dd5a560e23fc4c5478 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 19 Dec 2012 11:11:59 -0300 Subject: perf tests: Don't fail if a matching vmlinux isn't found, skip that test Now that Jiri introduced TEST_SKIP, don't fail on the first test, just skip it. Requested-by: Ingo Molnar Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-wtgs8rd7fjwfhx72pv4la127@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/vmlinux-kallsyms.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c index 0d1cdbee2f59..a1a8442829b4 100644 --- a/tools/perf/tests/vmlinux-kallsyms.c +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -101,7 +101,8 @@ int test__vmlinux_matches_kallsyms(void) */ if (machine__load_vmlinux_path(&vmlinux, type, vmlinux_matches_kallsyms_filter) <= 0) { - pr_debug("machine__load_vmlinux_path "); + pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n"); + err = TEST_SKIP; goto out; } -- cgit v1.2.3-59-g8ed1b From 53985a7bfa7b947c91f35c6adcdc9cacc6fef07f Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 20 Dec 2012 14:11:20 -0500 Subject: perf tools: remove redundant checks from _sort__sym_cmp We already check that sym_l and sum_r are non-NULLs, no need to do it twice. Signed-off-by: Sasha Levin Acked-by: Namhyung Kim Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1356030701-16284-12-git-send-email-sasha.levin@oracle.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index cfd1c0feb32d..dbaded90312c 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -143,10 +143,8 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r, if (sym_l == sym_r) return 0; - if (sym_l) - ip_l = sym_l->start; - if (sym_r) - ip_r = sym_r->start; + ip_l = sym_l->start; + ip_r = sym_r->start; return (int64_t)(ip_r - ip_l); } -- cgit v1.2.3-59-g8ed1b From 49e4ba54592435cbd150aea8f5ae6b776fc687f9 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 20 Dec 2012 14:11:16 -0500 Subject: perf kmem: use ARRAY_SIZE instead of reinventing it Signed-off-by: Sasha Levin Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1356030701-16284-8-git-send-email-sasha.levin@oracle.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-kmem.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 9fddca749ca2..c746108c5d48 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -609,8 +609,7 @@ static struct sort_dimension *avail_sorts[] = { &pingpong_sort_dimension, }; -#define NUM_AVAIL_SORTS \ - (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *)) +#define NUM_AVAIL_SORTS ((int)ARRAY_SIZE(avail_sorts)) static int sort_dimension__add(const char *tok, struct list_head *list) { -- cgit v1.2.3-59-g8ed1b From 50ca19aed8ad2e264ef6bce6d6d5cb7f8bfae8c9 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 20 Dec 2012 14:11:19 -0500 Subject: perf script: use ARRAY_SIZE instead of reinventing it Signed-off-by: Sasha Levin Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1356030701-16284-11-git-send-email-sasha.levin@oracle.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index b363e7b292b2..3314ef2ec83e 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -692,7 +692,7 @@ static int parse_output_fields(const struct option *opt __maybe_unused, const char *arg, int unset __maybe_unused) { char *tok; - int i, imax = sizeof(all_output_options) / sizeof(struct output_option); + int i, imax = ARRAY_SIZE(all_output_options); int j; int rc = 0; char *str = strdup(arg); -- cgit v1.2.3-59-g8ed1b From c91368c4889a0ee5dd06552adbb50ae54f5096fd Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 20 Dec 2012 14:11:30 -0500 Subject: uprobes: remove redundant check We checked for uprobe==NULL earlier, no need to redo that. Signed-off-by: Sasha Levin Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1356030701-16284-22-git-send-email-sasha.levin@oracle.com Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/uprobes.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index dea7acfbb071..30ea9a4f4ab4 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -901,8 +901,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume } mutex_unlock(uprobes_hash(inode)); - if (uprobe) - put_uprobe(uprobe); + put_uprobe(uprobe); } static struct rb_node * -- cgit v1.2.3-59-g8ed1b From 0da41ce954840a74e7a0de9c8268bf855147e902 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 21 Dec 2012 17:20:13 +0900 Subject: perf ui/gtk: Factor out common browser routines Separate out common codes for setting up a browser, and move report/hist browser codes into hists.c. The common codes can be used for annotation browser. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1356078018-31905-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 1 + tools/perf/ui/gtk/browser.c | 227 +------------------------------------------- tools/perf/ui/gtk/gtk.h | 9 +- tools/perf/ui/gtk/hists.c | 226 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 240 insertions(+), 223 deletions(-) create mode 100644 tools/perf/ui/gtk/hists.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 1539eb4d97a0..e18163bb32d7 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -688,6 +688,7 @@ ifndef NO_GTK2 BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null) EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null) LIB_OBJS += $(OUTPUT)ui/gtk/browser.o + LIB_OBJS += $(OUTPUT)ui/gtk/hists.o LIB_OBJS += $(OUTPUT)ui/gtk/setup.o LIB_OBJS += $(OUTPUT)ui/gtk/util.o LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c index e59ba337f494..c95012cdb438 100644 --- a/tools/perf/ui/gtk/browser.c +++ b/tools/perf/ui/gtk/browser.c @@ -8,15 +8,13 @@ #include -#define MAX_COLUMNS 32 - -static void perf_gtk__signal(int sig) +void perf_gtk__signal(int sig) { perf_gtk__exit(false); psignal(sig, "perf"); } -static void perf_gtk__resize_window(GtkWidget *window) +void perf_gtk__resize_window(GtkWidget *window) { GdkRectangle rect; GdkScreen *screen; @@ -36,7 +34,7 @@ static void perf_gtk__resize_window(GtkWidget *window) gtk_window_resize(GTK_WINDOW(window), width, height); } -static const char *perf_gtk__get_percent_color(double percent) +const char *perf_gtk__get_percent_color(double percent) { if (percent >= MIN_RED) return ""; @@ -45,147 +43,8 @@ static const char *perf_gtk__get_percent_color(double percent) return NULL; } -#define HPP__COLOR_FN(_name, _field) \ -static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \ - struct hist_entry *he) \ -{ \ - struct hists *hists = he->hists; \ - double percent = 100.0 * he->stat._field / hists->stats.total_period; \ - const char *markup; \ - int ret = 0; \ - \ - markup = perf_gtk__get_percent_color(percent); \ - if (markup) \ - ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \ - ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \ - if (markup) \ - ret += scnprintf(hpp->buf + ret, hpp->size - ret, ""); \ - \ - return ret; \ -} - -HPP__COLOR_FN(overhead, period) -HPP__COLOR_FN(overhead_sys, period_sys) -HPP__COLOR_FN(overhead_us, period_us) -HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) -HPP__COLOR_FN(overhead_guest_us, period_guest_us) - -#undef HPP__COLOR_FN - -void perf_gtk__init_hpp(void) -{ - perf_hpp__column_enable(PERF_HPP__OVERHEAD); - - perf_hpp__init(); - - perf_hpp__format[PERF_HPP__OVERHEAD].color = - perf_gtk__hpp_color_overhead; - perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = - perf_gtk__hpp_color_overhead_sys; - perf_hpp__format[PERF_HPP__OVERHEAD_US].color = - perf_gtk__hpp_color_overhead_us; - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = - perf_gtk__hpp_color_overhead_guest_sys; - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = - perf_gtk__hpp_color_overhead_guest_us; -} - -static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) -{ - struct perf_hpp_fmt *fmt; - GType col_types[MAX_COLUMNS]; - GtkCellRenderer *renderer; - struct sort_entry *se; - GtkListStore *store; - struct rb_node *nd; - GtkWidget *view; - int col_idx; - int nr_cols; - char s[512]; - - struct perf_hpp hpp = { - .buf = s, - .size = sizeof(s), - }; - - nr_cols = 0; - - perf_hpp__for_each_format(fmt) - col_types[nr_cols++] = G_TYPE_STRING; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->elide) - continue; - - col_types[nr_cols++] = G_TYPE_STRING; - } - - store = gtk_list_store_newv(nr_cols, col_types); - - view = gtk_tree_view_new(); - - renderer = gtk_cell_renderer_text_new(); - - col_idx = 0; - - perf_hpp__for_each_format(fmt) { - fmt->header(&hpp); - gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), - -1, s, - renderer, "markup", - col_idx++, NULL); - } - - list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->elide) - continue; - - gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), - -1, se->se_header, - renderer, "text", - col_idx++, NULL); - } - - gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); - - g_object_unref(GTK_TREE_MODEL(store)); - - for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { - struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); - GtkTreeIter iter; - - if (h->filtered) - continue; - - gtk_list_store_append(store, &iter); - - col_idx = 0; - - perf_hpp__for_each_format(fmt) { - if (fmt->color) - fmt->color(&hpp, h); - else - fmt->entry(&hpp, h); - - gtk_list_store_set(store, &iter, col_idx++, s, -1); - } - - list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->elide) - continue; - - se->se_snprintf(h, s, ARRAY_SIZE(s), - hists__col_len(hists, se->se_width_idx)); - - gtk_list_store_set(store, &iter, col_idx++, s, -1); - } - } - - gtk_container_add(GTK_CONTAINER(window), view); -} - #ifdef HAVE_GTK_INFO_BAR -static GtkWidget *perf_gtk__setup_info_bar(void) +GtkWidget *perf_gtk__setup_info_bar(void) { GtkWidget *info_bar; GtkWidget *label; @@ -212,7 +71,7 @@ static GtkWidget *perf_gtk__setup_info_bar(void) } #endif -static GtkWidget *perf_gtk__setup_statusbar(void) +GtkWidget *perf_gtk__setup_statusbar(void) { GtkWidget *stbar; unsigned ctxid; @@ -226,79 +85,3 @@ static GtkWidget *perf_gtk__setup_statusbar(void) return stbar; } - -int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, - const char *help, - struct hist_browser_timer *hbt __maybe_unused) -{ - struct perf_evsel *pos; - GtkWidget *vbox; - GtkWidget *notebook; - GtkWidget *info_bar; - GtkWidget *statbar; - GtkWidget *window; - - signal(SIGSEGV, perf_gtk__signal); - signal(SIGFPE, perf_gtk__signal); - signal(SIGINT, perf_gtk__signal); - signal(SIGQUIT, perf_gtk__signal); - signal(SIGTERM, perf_gtk__signal); - - window = gtk_window_new(GTK_WINDOW_TOPLEVEL); - - gtk_window_set_title(GTK_WINDOW(window), "perf report"); - - g_signal_connect(window, "delete_event", gtk_main_quit, NULL); - - pgctx = perf_gtk__activate_context(window); - if (!pgctx) - return -1; - - vbox = gtk_vbox_new(FALSE, 0); - - notebook = gtk_notebook_new(); - - list_for_each_entry(pos, &evlist->entries, node) { - struct hists *hists = &pos->hists; - const char *evname = perf_evsel__name(pos); - GtkWidget *scrolled_window; - GtkWidget *tab_label; - - scrolled_window = gtk_scrolled_window_new(NULL, NULL); - - gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), - GTK_POLICY_AUTOMATIC, - GTK_POLICY_AUTOMATIC); - - perf_gtk__show_hists(scrolled_window, hists); - - tab_label = gtk_label_new(evname); - - gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); - } - - gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); - - info_bar = perf_gtk__setup_info_bar(); - if (info_bar) - gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0); - - statbar = perf_gtk__setup_statusbar(); - gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); - - gtk_container_add(GTK_CONTAINER(window), vbox); - - gtk_widget_show_all(window); - - perf_gtk__resize_window(window); - - gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); - - ui_helpline__push(help); - - gtk_main(); - - perf_gtk__deactivate_context(&pgctx); - - return 0; -} diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h index 856320e2cc05..5d3693754828 100644 --- a/tools/perf/ui/gtk/gtk.h +++ b/tools/perf/ui/gtk/gtk.h @@ -33,7 +33,14 @@ void perf_gtk__init_helpline(void); void perf_gtk__init_progress(void); void perf_gtk__init_hpp(void); -#ifndef HAVE_GTK_INFO_BAR +void perf_gtk__signal(int sig); +void perf_gtk__resize_window(GtkWidget *window); +const char *perf_gtk__get_percent_color(double percent); +GtkWidget *perf_gtk__setup_statusbar(void); + +#ifdef HAVE_GTK_INFO_BAR +GtkWidget *perf_gtk__setup_info_bar(void); +#else static inline GtkWidget *perf_gtk__setup_info_bar(void) { return NULL; diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c new file mode 100644 index 000000000000..26912f86e032 --- /dev/null +++ b/tools/perf/ui/gtk/hists.c @@ -0,0 +1,226 @@ +#include "../evlist.h" +#include "../cache.h" +#include "../evsel.h" +#include "../sort.h" +#include "../hist.h" +#include "../helpline.h" +#include "gtk.h" + +#define MAX_COLUMNS 32 + +#define HPP__COLOR_FN(_name, _field) \ +static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \ + struct hist_entry *he) \ +{ \ + struct hists *hists = he->hists; \ + double percent = 100.0 * he->stat._field / hists->stats.total_period; \ + const char *markup; \ + int ret = 0; \ + \ + markup = perf_gtk__get_percent_color(percent); \ + if (markup) \ + ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \ + ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \ + if (markup) \ + ret += scnprintf(hpp->buf + ret, hpp->size - ret, ""); \ + \ + return ret; \ +} + +HPP__COLOR_FN(overhead, period) +HPP__COLOR_FN(overhead_sys, period_sys) +HPP__COLOR_FN(overhead_us, period_us) +HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) +HPP__COLOR_FN(overhead_guest_us, period_guest_us) + +#undef HPP__COLOR_FN + + +void perf_gtk__init_hpp(void) +{ + perf_hpp__column_enable(PERF_HPP__OVERHEAD); + + perf_hpp__init(); + + perf_hpp__format[PERF_HPP__OVERHEAD].color = + perf_gtk__hpp_color_overhead; + perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = + perf_gtk__hpp_color_overhead_sys; + perf_hpp__format[PERF_HPP__OVERHEAD_US].color = + perf_gtk__hpp_color_overhead_us; + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = + perf_gtk__hpp_color_overhead_guest_sys; + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = + perf_gtk__hpp_color_overhead_guest_us; +} + +static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) +{ + struct perf_hpp_fmt *fmt; + GType col_types[MAX_COLUMNS]; + GtkCellRenderer *renderer; + struct sort_entry *se; + GtkListStore *store; + struct rb_node *nd; + GtkWidget *view; + int col_idx; + int nr_cols; + char s[512]; + + struct perf_hpp hpp = { + .buf = s, + .size = sizeof(s), + }; + + nr_cols = 0; + + perf_hpp__for_each_format(fmt) + col_types[nr_cols++] = G_TYPE_STRING; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + if (se->elide) + continue; + + col_types[nr_cols++] = G_TYPE_STRING; + } + + store = gtk_list_store_newv(nr_cols, col_types); + + view = gtk_tree_view_new(); + + renderer = gtk_cell_renderer_text_new(); + + col_idx = 0; + + perf_hpp__for_each_format(fmt) { + fmt->header(&hpp); + + gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), + -1, s, + renderer, "markup", + col_idx++, NULL); + } + + list_for_each_entry(se, &hist_entry__sort_list, list) { + if (se->elide) + continue; + + gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), + -1, se->se_header, + renderer, "text", + col_idx++, NULL); + } + + gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); + + g_object_unref(GTK_TREE_MODEL(store)); + + for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { + struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); + GtkTreeIter iter; + + if (h->filtered) + continue; + + gtk_list_store_append(store, &iter); + + col_idx = 0; + + perf_hpp__for_each_format(fmt) { + if (fmt->color) + fmt->color(&hpp, h); + else + fmt->entry(&hpp, h); + + gtk_list_store_set(store, &iter, col_idx++, s, -1); + } + + list_for_each_entry(se, &hist_entry__sort_list, list) { + if (se->elide) + continue; + + se->se_snprintf(h, s, ARRAY_SIZE(s), + hists__col_len(hists, se->se_width_idx)); + + gtk_list_store_set(store, &iter, col_idx++, s, -1); + } + } + + gtk_container_add(GTK_CONTAINER(window), view); +} + +int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, + const char *help, + struct hist_browser_timer *hbt __maybe_unused) +{ + struct perf_evsel *pos; + GtkWidget *vbox; + GtkWidget *notebook; + GtkWidget *info_bar; + GtkWidget *statbar; + GtkWidget *window; + + signal(SIGSEGV, perf_gtk__signal); + signal(SIGFPE, perf_gtk__signal); + signal(SIGINT, perf_gtk__signal); + signal(SIGQUIT, perf_gtk__signal); + signal(SIGTERM, perf_gtk__signal); + + window = gtk_window_new(GTK_WINDOW_TOPLEVEL); + + gtk_window_set_title(GTK_WINDOW(window), "perf report"); + + g_signal_connect(window, "delete_event", gtk_main_quit, NULL); + + pgctx = perf_gtk__activate_context(window); + if (!pgctx) + return -1; + + vbox = gtk_vbox_new(FALSE, 0); + + notebook = gtk_notebook_new(); + + list_for_each_entry(pos, &evlist->entries, node) { + struct hists *hists = &pos->hists; + const char *evname = perf_evsel__name(pos); + GtkWidget *scrolled_window; + GtkWidget *tab_label; + + scrolled_window = gtk_scrolled_window_new(NULL, NULL); + + gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), + GTK_POLICY_AUTOMATIC, + GTK_POLICY_AUTOMATIC); + + perf_gtk__show_hists(scrolled_window, hists); + + tab_label = gtk_label_new(evname); + + gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); + } + + gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); + + info_bar = perf_gtk__setup_info_bar(); + if (info_bar) + gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0); + + statbar = perf_gtk__setup_statusbar(); + gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); + + gtk_container_add(GTK_CONTAINER(window), vbox); + + gtk_widget_show_all(window); + + perf_gtk__resize_window(window); + + gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); + + ui_helpline__push(help); + + gtk_main(); + + perf_gtk__deactivate_context(&pgctx); + + return 0; +} -- cgit v1.2.3-59-g8ed1b From 6bf1a295a896c8dbe8cad663c6344e8c877a0570 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 21 Dec 2012 17:20:14 +0900 Subject: perf ui/gtk: Setup browser window early The ui__error/warning functions use gtk infobar or statusbar and pr_* functions use statusbar too. But after perf gtk context created but those infobar and/or statusbar not yet set up, calling one of those functions will get a segment fault. Although current code has no problem, move these setting as early as possible so that it can prevent the segfault from future change. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1356078018-31905-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/gtk/hists.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index 26912f86e032..c03da79d524f 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -180,6 +180,17 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, notebook = gtk_notebook_new(); + gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); + + info_bar = perf_gtk__setup_info_bar(); + if (info_bar) + gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0); + + statbar = perf_gtk__setup_statusbar(); + gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); + + gtk_container_add(GTK_CONTAINER(window), vbox); + list_for_each_entry(pos, &evlist->entries, node) { struct hists *hists = &pos->hists; const char *evname = perf_evsel__name(pos); @@ -199,17 +210,6 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); } - gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); - - info_bar = perf_gtk__setup_info_bar(); - if (info_bar) - gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0); - - statbar = perf_gtk__setup_statusbar(); - gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); - - gtk_container_add(GTK_CONTAINER(window), vbox); - gtk_widget_show_all(window); perf_gtk__resize_window(window); -- cgit v1.2.3-59-g8ed1b From fba7a78220fd969beeaa9abbb97c5f7027a7b4fd Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Fri, 21 Dec 2012 15:00:58 -0500 Subject: tools lib traceevent: test correct variable after allocation we've tested the wrong variable for allocation failure, fix it to test the right one. Cc: Frederic Weisbecker Cc: Namhyung Kim Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1356120062-2648-1-git-send-email-sasha.levin@oracle.com Signed-off-by: Sasha Levin Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/traceevent/event-parse.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 5a824e355d04..7538a1f9c557 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -2481,7 +2481,7 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char ** free_token(token); arg = alloc_arg(); - if (!field) { + if (!arg) { do_warning("%s: not enough memory!", __func__); *tok = NULL; return EVENT_ERROR; -- cgit v1.2.3-59-g8ed1b From 7b9f6b402c756975ac4e23a5f1bfa261c7c90017 Mon Sep 17 00:00:00 2001 From: Jon Stanley Date: Fri, 7 Sep 2012 16:32:46 -0400 Subject: tools lib traceevent: Update FSF postal address to be URL's. The FSF now prefers to use URL's in copyright headers rather than their postal address. This change updates the address to be per http://www.gnu.org/licenses/gpl-howto.html Signed-off-by: Jon Stanley Acked-by: Steven Rostedt Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1347049967-3143-1-git-send-email-jonstanley@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/traceevent/event-parse.c | 3 +-- tools/lib/traceevent/event-parse.h | 3 +-- tools/lib/traceevent/event-utils.h | 3 +-- tools/lib/traceevent/parse-filter.c | 3 +-- tools/lib/traceevent/trace-seq.c | 3 +-- 5 files changed, 5 insertions(+), 10 deletions(-) diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 7538a1f9c557..dc42e55469d1 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -13,8 +13,7 @@ * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * License along with this program; if not, see * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 24a4bbabc5d5..7be7e89533e4 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h @@ -13,8 +13,7 @@ * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * License along with this program; if not, see * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/tools/lib/traceevent/event-utils.h b/tools/lib/traceevent/event-utils.h index bc075006966e..e76c9acb92cd 100644 --- a/tools/lib/traceevent/event-utils.h +++ b/tools/lib/traceevent/event-utils.h @@ -13,8 +13,7 @@ * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * License along with this program; if not, see * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c index 5ea4326ad11f..2500e75583fc 100644 --- a/tools/lib/traceevent/parse-filter.c +++ b/tools/lib/traceevent/parse-filter.c @@ -13,8 +13,7 @@ * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * License along with this program; if not, see * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c index b1ccc923e8a5..a57db805136a 100644 --- a/tools/lib/traceevent/trace-seq.c +++ b/tools/lib/traceevent/trace-seq.c @@ -13,8 +13,7 @@ * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * License along with this program; if not, see * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -- cgit v1.2.3-59-g8ed1b From 2d2481ddf634695aa17ad91bd765572329941674 Mon Sep 17 00:00:00 2001 From: Jon Stanley Date: Sat, 8 Sep 2012 14:00:51 -0400 Subject: tools lib traceevent: Add copyright header Adding a missing copyright header to parse-utils.c. Assuminng that the license is LGPL like the rest of the trace-cmd library code. Signed-off-by: Jon Stanley Acked-by: Steven Rostedt Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1347127251-4695-1-git-send-email-jonstanley@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/traceevent/parse-utils.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tools/lib/traceevent/parse-utils.c b/tools/lib/traceevent/parse-utils.c index f023a133abb6..bba701cf10e6 100644 --- a/tools/lib/traceevent/parse-utils.c +++ b/tools/lib/traceevent/parse-utils.c @@ -1,3 +1,22 @@ +/* + * Copyright (C) 2010 Red Hat Inc, Steven Rostedt + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ #include #include #include -- cgit v1.2.3-59-g8ed1b From 31160d7feab786c991780d7f0ce2755a469e0e5e Mon Sep 17 00:00:00 2001 From: Al Cooper Date: Tue, 8 Jan 2013 16:22:36 -0500 Subject: perf tools: Fix GNU make v3.80 compatibility issue According to Documentation/Changes, the kernel should be buildable with GNU make 3.80+. tools/perf/config/utilities.mak contains the "$(or" construct, which requires make 3.81. This causes "make" to fail on systems with GNU make 3.80. Replace "$(or" with an equivalent "$(if" expression, to restore backward compatibility. Also fix an issue where _get_attempt was called with only one argument. This prevented the error message from printing the name of the variable that can be used to fix the problem. Signed-off-by: Al Cooper Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1357680156-15520-1-git-send-email-alcooperx@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/config/utilities.mak | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak index e5413125e6bb..8ef3bd30a549 100644 --- a/tools/perf/config/utilities.mak +++ b/tools/perf/config/utilities.mak @@ -13,7 +13,7 @@ newline := $(newline) # what should replace a newline when escaping # newlines; the default is a bizarre string. # -nl-escape = $(or $(1),m822df3020w6a44id34bt574ctac44eb9f4n) +nl-escape = $(if $(1),$(1),m822df3020w6a44id34bt574ctac44eb9f4n) # escape-nl # @@ -173,9 +173,9 @@ _ge-abspath = $(if $(is-executable),$(1)) # Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default) # define get-executable-or-default -$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2))) +$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2),$(1))) endef -_ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2))) +_ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2))) _gea_warn = $(warning The path '$(1)' is not executable.) _gea_err = $(if $(1),$(error Please set '$(1)' appropriately)) -- cgit v1.2.3-59-g8ed1b From a65c23420668f7112395abf9c6f22c6450567bf0 Mon Sep 17 00:00:00 2001 From: Thomas Jarosch Date: Tue, 25 Dec 2012 17:23:39 +0100 Subject: perf tools: Fix possible (unlikely) buffer overflow cppcheck reported: [tools/perf/util/sysfs.c:50]: (error) Width 4096 given in format string (no. 1) is larger than destination buffer 'sysfs_mountpoint[4096]', use %4095s to prevent overflowing it -> All other places in the kernel that use STR(PATH_MAX) have a buffer size of PATH_MAX + 1. Signed-off-by: Thomas Jarosch Link: http://lkml.kernel.org/r/50D9D30B.8090002@intra2net.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/sysfs.c b/tools/perf/util/sysfs.c index 48c6902e749f..f71e9eafe15a 100644 --- a/tools/perf/util/sysfs.c +++ b/tools/perf/util/sysfs.c @@ -8,7 +8,7 @@ static const char * const sysfs_known_mountpoints[] = { }; static int sysfs_found; -char sysfs_mountpoint[PATH_MAX]; +char sysfs_mountpoint[PATH_MAX + 1]; static int sysfs_valid_mountpoint(const char *sysfs) { -- cgit v1.2.3-59-g8ed1b From eec185ab603fab3b7b5021e840cf4b282c4c7e6b Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 28 Dec 2012 16:16:49 +0900 Subject: perf symbols: Include elf.h header regardless LIBELF_SUPPORT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The elf.h header file is used for NO_LIBELF build too so it should be included anyway. Also remove duplicated include of the header file in symbol-*.c. This patch fixes following build error on NO_LIBELF build: CC tests/hists_link.o tests/hists_link.c: In function ‘setup_fake_machine’: tests/hists_link.c:132:8: error: ‘STB_GLOBAL’ undeclared (first use in this function) tests/hists_link.c:132:8: note: each undeclared identifier is reported only once for each function it appears in Signed-off-by: Namhyung Kim Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1356679009-32122-1-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 3 --- tools/perf/util/symbol-minimal.c | 1 - tools/perf/util/symbol.h | 2 +- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index f63557b59c06..54efcb5659ac 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1,6 +1,3 @@ -#include -#include -#include #include #include #include diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c index 259f8f2ea9c9..a7390cde63bc 100644 --- a/tools/perf/util/symbol-minimal.c +++ b/tools/perf/util/symbol-minimal.c @@ -1,6 +1,5 @@ #include "symbol.h" -#include #include #include #include diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index ec7b2405c377..d97377ac2f16 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -16,8 +16,8 @@ #ifdef LIBELF_SUPPORT #include #include -#include #endif +#include #include "dso.h" -- cgit v1.2.3-59-g8ed1b From 9b494ea2f5638184bc203a30062b32b9a9a05d9e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 8 Jan 2013 18:39:26 +0900 Subject: perf bench: Flush stdout before starting bench suite perf bench prints header message for bench suite before starting the benchmark. However if the stdout is redirected to a file and bench suite forks child processes this (and possibly other debugging messages too) will be repeated multiple times. $ perf bench sched messaging # Running sched/messaging benchmark... # 20 sender and receiver processes per group # 10 groups == 400 processes run Total time: 0.100 [sec] $ perf bench sched messaging > result.txt $ wc -l result.txt 391 In this file, there were so many "Running sched/messaging benchmark..." lines. This was because stdout is converted to fully-buffered due to the redirection and inherited child processes. Other lines are printed after reaping all those tasks. So fix it by flushing stdout before starting bench suites. Signed-off-by: Namhyung Kim Acked-by: Hitoshi Mitake Cc: Hitoshi Mitake Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1357637966-8216-1-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-bench.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index cae9a5fd2ecf..afd1255a632f 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -159,6 +159,7 @@ static void all_suite(struct bench_subsys *subsys) /* FROM HERE */ printf("# Running %s/%s benchmark...\n", subsys->name, suites[i].name); + fflush(stdout); argv[1] = suites[i].name; suites[i].fn(1, argv, NULL); @@ -225,6 +226,7 @@ int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused) printf("# Running %s/%s benchmark...\n", subsystems[i].name, subsystems[i].suites[j].name); + fflush(stdout); status = subsystems[i].suites[j].fn(argc - 1, argv + 1, prefix); goto end; -- cgit v1.2.3-59-g8ed1b From d0528b5d71faf612014dd7672e44225c915344b2 Mon Sep 17 00:00:00 2001 From: Joshua Zhu Date: Sat, 5 Jan 2013 13:29:57 +0800 Subject: perf tools: Add anonymous huge page recognition Judging anonymous memory's vm_area_struct, perf_mmap_event's filename will be set to "//anon" indicating this vma belongs to anonymous memory. Once hugepage is used, vma's vm_file points to hugetlbfs. In this way, this vma will not be regarded as anonymous memory by is_anon_memory() in perf user space utility. Signed-off-by: Joshua Zhu Cc: Akihiro Nagai Cc: Andi Kleen Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Joshua Zhu Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1357363797-3550-1-git-send-email-zhu.wen-jie@hp.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/map.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 0328d45c4f2a..ff94425779a2 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -19,7 +19,8 @@ const char *map_type__name[MAP__NR_TYPES] = { static inline int is_anon_memory(const char *filename) { - return strcmp(filename, "//anon") == 0; + return !strcmp(filename, "//anon") || + !strcmp(filename, "/anon_hugepage (deleted)"); } static inline int is_no_dso_memory(const char *filename) -- cgit v1.2.3-59-g8ed1b From b878e7fb22ea48b0585bbbbef249f7efc6d42748 Mon Sep 17 00:00:00 2001 From: Vince Weaver Date: Tue, 8 Jan 2013 14:44:25 -0500 Subject: perf: Missing field in PERF_RECORD_SAMPLE documentation While trying to write a perf_event/mmap test for my perf_event test-suite I came across a missing field description in the PERF_RECORD_SAMPLE documentation in perf_event.h Signed-off-by: Vince Weaver Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1301081439300.24507@vincent-weaver-1.um.maine.edu Signed-off-by: Arnaldo Carvalho de Melo --- include/uapi/linux/perf_event.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 4f63c05d27c9..9fa9c622a7f4 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -579,7 +579,8 @@ enum perf_event_type { * { u32 size; * char data[size];}&& PERF_SAMPLE_RAW * - * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK + * { u64 nr; + * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER -- cgit v1.2.3-59-g8ed1b From 7ce28b5b5b320e26ac6a0e352d5005bce3530e05 Mon Sep 17 00:00:00 2001 From: Hyeoncheol Lee Date: Tue, 11 Sep 2012 16:57:28 +0900 Subject: perf probe: Allow of casting an array of char to string Before casting a type of a variable to string, convert_variable_type() confirms that the type is a pointer or an array. then if it is a pointer to char, it is casted to string. but in case of an array of char, it isn't Signed-off-by: H.C. Lee Acked-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/CANFS6bb75e8a_UtyAD9yF73hfXDy0N8tSjDz=a+Vna=Y8ORMHg@mail.gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/probe-finder.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 1daf5c14e751..be0329394d56 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -413,12 +413,12 @@ static int convert_variable_type(Dwarf_Die *vr_die, dwarf_diename(vr_die), dwarf_diename(&type)); return -EINVAL; } + if (die_get_real_type(&type, &type) == NULL) { + pr_warning("Failed to get a type" + " information.\n"); + return -ENOENT; + } if (ret == DW_TAG_pointer_type) { - if (die_get_real_type(&type, &type) == NULL) { - pr_warning("Failed to get a type" - " information.\n"); - return -ENOENT; - } while (*ref_ptr) ref_ptr = &(*ref_ptr)->next; /* Add new reference with offset +0 */ -- cgit v1.2.3-59-g8ed1b From 14d1ac7429d104b09d65c72fd215e1cffd5c7eba Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 27 Dec 2012 18:11:38 +0900 Subject: perf sort: Move misplaced sort entry functions Some functions are misplaced along with other entries. Move them to a right place so that it can be found together with related functions. No functional change intended. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1356599507-14226-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 119 +++++++++++++++++++++++++------------------------ 1 file changed, 60 insertions(+), 59 deletions(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index dbaded90312c..ce5d40456c07 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -97,6 +97,16 @@ static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf, return repsep_snprintf(bf, size, "%*s", width, self->thread->comm); } +struct sort_entry sort_comm = { + .se_header = "Command", + .se_cmp = sort__comm_cmp, + .se_collapse = sort__comm_collapse, + .se_snprintf = hist_entry__comm_snprintf, + .se_width_idx = HISTC_COMM, +}; + +/* --sort dso */ + static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) { struct dso *dso_l = map_l ? map_l->dso : NULL; @@ -117,22 +127,38 @@ static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) return strcmp(dso_name_l, dso_name_r); } -struct sort_entry sort_comm = { - .se_header = "Command", - .se_cmp = sort__comm_cmp, - .se_collapse = sort__comm_collapse, - .se_snprintf = hist_entry__comm_snprintf, - .se_width_idx = HISTC_COMM, -}; - -/* --sort dso */ - static int64_t sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) { return _sort__dso_cmp(left->ms.map, right->ms.map); } +static int _hist_entry__dso_snprintf(struct map *map, char *bf, + size_t size, unsigned int width) +{ + if (map && map->dso) { + const char *dso_name = !verbose ? map->dso->short_name : + map->dso->long_name; + return repsep_snprintf(bf, size, "%-*s", width, dso_name); + } + + return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); +} + +static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, + size_t size, unsigned int width) +{ + return _hist_entry__dso_snprintf(self->ms.map, bf, size, width); +} + +struct sort_entry sort_dso = { + .se_header = "Shared Object", + .se_cmp = sort__dso_cmp, + .se_snprintf = hist_entry__dso_snprintf, + .se_width_idx = HISTC_DSO, +}; + +/* --sort symbol */ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r, u64 ip_l, u64 ip_r) @@ -149,22 +175,24 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r, return (int64_t)(ip_r - ip_l); } -static int _hist_entry__dso_snprintf(struct map *map, char *bf, - size_t size, unsigned int width) +static int64_t +sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) { - if (map && map->dso) { - const char *dso_name = !verbose ? map->dso->short_name : - map->dso->long_name; - return repsep_snprintf(bf, size, "%-*s", width, dso_name); - } + u64 ip_l, ip_r; - return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); -} + if (!left->ms.sym && !right->ms.sym) + return right->level - left->level; -static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width) -{ - return _hist_entry__dso_snprintf(self->ms.map, bf, size, width); + if (!left->ms.sym || !right->ms.sym) + return cmp_null(left->ms.sym, right->ms.sym); + + if (left->ms.sym == right->ms.sym) + return 0; + + ip_l = left->ms.sym->start; + ip_r = right->ms.sym->start; + + return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r); } static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, @@ -195,14 +223,6 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, return ret; } - -struct sort_entry sort_dso = { - .se_header = "Shared Object", - .se_cmp = sort__dso_cmp, - .se_snprintf = hist_entry__dso_snprintf, - .se_width_idx = HISTC_DSO, -}; - static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width __maybe_unused) @@ -211,27 +231,6 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, self->level, bf, size, width); } -/* --sort symbol */ -static int64_t -sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) -{ - u64 ip_l, ip_r; - - if (!left->ms.sym && !right->ms.sym) - return right->level - left->level; - - if (!left->ms.sym || !right->ms.sym) - return cmp_null(left->ms.sym, right->ms.sym); - - if (left->ms.sym == right->ms.sym) - return 0; - - ip_l = left->ms.sym->start; - ip_r = right->ms.sym->start; - - return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r); -} - struct sort_entry sort_sym = { .se_header = "Symbol", .se_cmp = sort__sym_cmp, @@ -343,6 +342,8 @@ struct sort_entry sort_cpu = { .se_width_idx = HISTC_CPU, }; +/* sort keys for branch stacks */ + static int64_t sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -357,13 +358,6 @@ static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf, bf, size, width); } -struct sort_entry sort_dso_from = { - .se_header = "Source Shared Object", - .se_cmp = sort__dso_from_cmp, - .se_snprintf = hist_entry__dso_from_snprintf, - .se_width_idx = HISTC_DSO_FROM, -}; - static int64_t sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -423,6 +417,13 @@ static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, } +struct sort_entry sort_dso_from = { + .se_header = "Source Shared Object", + .se_cmp = sort__dso_from_cmp, + .se_snprintf = hist_entry__dso_from_snprintf, + .se_width_idx = HISTC_DSO_FROM, +}; + struct sort_entry sort_dso_to = { .se_header = "Target Shared Object", .se_cmp = sort__dso_to_cmp, -- cgit v1.2.3-59-g8ed1b From 433555221b6c8bff4fee2d5ab84e7aea6b1f068e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 27 Dec 2012 18:11:39 +0900 Subject: perf sort: Get rid of unnecessary __maybe_unused Some functions have set __maybe_unused on its arguments that are used actually. Remove them. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1356599507-14226-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index ce5d40456c07..3c2836eeafb5 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -197,7 +197,7 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, u64 ip, char level, char *bf, size_t size, - unsigned int width __maybe_unused) + unsigned int width) { size_t ret = 0; @@ -224,8 +224,7 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, } static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, - size_t size, - unsigned int width __maybe_unused) + size_t size, unsigned int width) { return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, self->level, bf, size, width); @@ -398,8 +397,7 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) } static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, - size_t size, - unsigned int width __maybe_unused) + size_t size, unsigned int width) { struct addr_map_symbol *from = &self->branch_info->from; return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, @@ -408,8 +406,7 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, } static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, - size_t size, - unsigned int width __maybe_unused) + size_t size, unsigned int width) { struct addr_map_symbol *to = &self->branch_info->to; return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, -- cgit v1.2.3-59-g8ed1b From fb29a338b585ebcce793b8e4a6c62440c4574fa7 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 27 Dec 2012 18:11:40 +0900 Subject: perf sort: Fix --sort pid output The "pid" sort key prints "Command: Pid" output but it's misaligned. It's because of the offset of 6 was added to the column length during the calculation in order to reserve an space for Pid part but it isn't honored when printed. The output before this patch was like this: # Overhead Command: Pid Shared Object # ........ ............. ................. # 99.70% noploop:17814 noploop 0.29% noploop:17814 [kernel.kallsyms] 0.01% noploop:17814 ld-2.15.so Fix it by subtracting 6 for printing comm part. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1356599507-14226-4-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 3c2836eeafb5..8b6d70bd749b 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -60,7 +60,7 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%*s:%5d", width, + return repsep_snprintf(bf, size, "%*s:%5d", width - 6, self->thread->comm ?: "", self->thread->pid); } -- cgit v1.2.3-59-g8ed1b From dccf180542c4e27621ea526769014dc97b9e5676 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 27 Dec 2012 18:11:41 +0900 Subject: perf sort: Align cpu column to right Since cpu number is a natural number, it'd be more appropriate aligning it to right. Before: # Overhead CPU Command: Pid Shared Object # ........ ... ................. ..................... # 8.91% 8 gnome-shell: 1497 perf-1497.map 8.90% 7 gnome-shell: 1497 perf-1497.map 8.86% 9 gnome-shell: 1497 perf-1497.map 8.83% 6 gnome-shell: 1497 perf-1497.map 8.81% 10 gnome-shell: 1497 perf-1497.map 7.44% 5 gnome-shell: 1497 perf-1497.map 6.20% 3 gnome-shell: 1497 perf-1497.map 5.10% 0 gnome-shell: 1497 perf-1497.map After: # Overhead CPU Command: Pid Shared Object # ........ ... ................. ..................... # 8.91% 8 gnome-shell: 1497 perf-1497.map 8.90% 7 gnome-shell: 1497 perf-1497.map 8.86% 9 gnome-shell: 1497 perf-1497.map 8.83% 6 gnome-shell: 1497 perf-1497.map 8.81% 10 gnome-shell: 1497 perf-1497.map 7.44% 5 gnome-shell: 1497 perf-1497.map 6.20% 3 gnome-shell: 1497 perf-1497.map 5.10% 0 gnome-shell: 1497 perf-1497.map Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1356599507-14226-5-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 8b6d70bd749b..a36051b34901 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -331,7 +331,7 @@ sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*d", width, self->cpu); + return repsep_snprintf(bf, size, "%*d", width, self->cpu); } struct sort_entry sort_cpu = { -- cgit v1.2.3-59-g8ed1b From cb993744554b0a5bd5c46239544cec9fd252a106 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 27 Dec 2012 18:11:42 +0900 Subject: perf sort: Calculate parent column width too When hists__calc_col_len() called, most of column length are refreshed but it missed parent column. So if the parent sort key was used along with other keys rests will be misalinged since parent has no proper column width. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1356599507-14226-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/hist.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 965ebf948f2f..9485c7024f5b 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -82,6 +82,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) hists__new_col_len(hists, HISTC_DSO, len); } + if (h->parent) + hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); + if (h->branch_info) { int symlen; /* -- cgit v1.2.3-59-g8ed1b From 6f38cf25a6af5b21da1d52a94d9f56f5af2a215b Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 27 Dec 2012 18:11:45 +0900 Subject: perf sort: Clean up sort__first_dimension setting It doesn't need to compare to every sort key names since the index already has the required information. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1356599507-14226-9-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index a36051b34901..39b1ab0967f4 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -526,30 +526,8 @@ int sort_dimension__add(const char *tok) if (sd->entry->se_collapse) sort__need_collapse = 1; - if (list_empty(&hist_entry__sort_list)) { - if (!strcmp(sd->name, "pid")) - sort__first_dimension = SORT_PID; - else if (!strcmp(sd->name, "comm")) - sort__first_dimension = SORT_COMM; - else if (!strcmp(sd->name, "dso")) - sort__first_dimension = SORT_DSO; - else if (!strcmp(sd->name, "symbol")) - sort__first_dimension = SORT_SYM; - else if (!strcmp(sd->name, "parent")) - sort__first_dimension = SORT_PARENT; - else if (!strcmp(sd->name, "cpu")) - sort__first_dimension = SORT_CPU; - else if (!strcmp(sd->name, "symbol_from")) - sort__first_dimension = SORT_SYM_FROM; - else if (!strcmp(sd->name, "symbol_to")) - sort__first_dimension = SORT_SYM_TO; - else if (!strcmp(sd->name, "dso_from")) - sort__first_dimension = SORT_DSO_FROM; - else if (!strcmp(sd->name, "dso_to")) - sort__first_dimension = SORT_DSO_TO; - else if (!strcmp(sd->name, "mispredict")) - sort__first_dimension = SORT_MISPREDICT; - } + if (list_empty(&hist_entry__sort_list)) + sort__first_dimension = i; list_add_tail(&sd->entry->list, &hist_entry__sort_list); sd->taken = 1; -- cgit v1.2.3-59-g8ed1b From fc5871ed0dcf8c76dd1b3b36ff0f70112d2f0e74 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 27 Dec 2012 18:11:46 +0900 Subject: perf sort: Separate out branch stack specific sort keys Current perf report gets segmentation fault when a branch stack specific sort key is provided by --sort option to a perf.data file which contains no branch infomation. It's because those sort keys reference branch info of a hist entry unconditionally. Maybe we can change it checks whether such branch info is valid or not. But if the branch stacks are not recorded, it'd be nop. Thus it'd be better to make those keys are unselectable. This patch separates those keys to a different dimension array, so that if user passes such a key to a file which has no branch stack will get following message rather than a segfault. Error: Invalid --sort key: `symbol_from' Signed-off-by: Namhyung Kim Reported-by: Stefan Beller Acked-by: Jiri Olsa Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1356599507-14226-10-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 64 ++++++++++++++++++++++++++++++++++++++++---------- tools/perf/util/sort.h | 8 +++++-- 2 files changed, 58 insertions(+), 14 deletions(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 39b1ab0967f4..7ad62393aa88 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -480,30 +480,40 @@ struct sort_dimension { #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } -static struct sort_dimension sort_dimensions[] = { +static struct sort_dimension common_sort_dimensions[] = { DIM(SORT_PID, "pid", sort_thread), DIM(SORT_COMM, "comm", sort_comm), DIM(SORT_DSO, "dso", sort_dso), - DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), - DIM(SORT_DSO_TO, "dso_to", sort_dso_to), DIM(SORT_SYM, "symbol", sort_sym), - DIM(SORT_SYM_TO, "symbol_from", sort_sym_from), - DIM(SORT_SYM_FROM, "symbol_to", sort_sym_to), DIM(SORT_PARENT, "parent", sort_parent), DIM(SORT_CPU, "cpu", sort_cpu), - DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), DIM(SORT_SRCLINE, "srcline", sort_srcline), }; +#undef DIM + +#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } + +static struct sort_dimension bstack_sort_dimensions[] = { + DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), + DIM(SORT_DSO_TO, "dso_to", sort_dso_to), + DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), + DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), + DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), +}; + +#undef DIM + int sort_dimension__add(const char *tok) { unsigned int i; - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { - struct sort_dimension *sd = &sort_dimensions[i]; + for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { + struct sort_dimension *sd = &common_sort_dimensions[i]; if (strncasecmp(tok, sd->name, strlen(tok))) continue; + if (sd->entry == &sort_parent) { int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); if (ret) { @@ -514,9 +524,7 @@ int sort_dimension__add(const char *tok) return -EINVAL; } sort__has_parent = 1; - } else if (sd->entry == &sort_sym || - sd->entry == &sort_sym_from || - sd->entry == &sort_sym_to) { + } else if (sd->entry == &sort_sym) { sort__has_sym = 1; } @@ -534,6 +542,34 @@ int sort_dimension__add(const char *tok) return 0; } + + for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { + struct sort_dimension *sd = &bstack_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sort__branch_mode != 1) + return -EINVAL; + + if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) + sort__has_sym = 1; + + if (sd->taken) + return 0; + + if (sd->entry->se_collapse) + sort__need_collapse = 1; + + if (list_empty(&hist_entry__sort_list)) + sort__first_dimension = i + __SORT_BRANCH_STACK; + + list_add_tail(&sd->entry->list, &hist_entry__sort_list); + sd->taken = 1; + + return 0; + } + return -ESRCH; } @@ -543,7 +579,11 @@ void setup_sorting(const char * const usagestr[], const struct option *opts) for (tok = strtok_r(str, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { - if (sort_dimension__add(tok) < 0) { + int ret = sort_dimension__add(tok); + if (ret == -EINVAL) { + error("Invalid --sort key: `%s'", tok); + usage_with_options(usagestr, opts); + } else if (ret == -ESRCH) { error("Unknown --sort key: `%s'", tok); usage_with_options(usagestr, opts); } diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index a1c0d56b6885..e994ad3e9897 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -122,18 +122,22 @@ static inline void hist_entry__add_pair(struct hist_entry *he, } enum sort_type { + /* common sort keys */ SORT_PID, SORT_COMM, SORT_DSO, SORT_SYM, SORT_PARENT, SORT_CPU, - SORT_DSO_FROM, + SORT_SRCLINE, + + /* branch stack specific sort keys */ + __SORT_BRANCH_STACK, + SORT_DSO_FROM = __SORT_BRANCH_STACK, SORT_DSO_TO, SORT_SYM_FROM, SORT_SYM_TO, SORT_MISPREDICT, - SORT_SRCLINE, }; /* -- cgit v1.2.3-59-g8ed1b From 9811360ec8b76a68599cb0629cebca026c93cfce Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 27 Dec 2012 18:11:47 +0900 Subject: perf report: Update documentation for sort keys Add description of sort keys to the perf-report document and also add missing cpu and srcline keys to the command line help string. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1356599507-14226-11-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-report.txt | 38 +++++++++++++++++++++++++++++--- tools/perf/builtin-report.c | 4 ++-- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index f4d91bebd59d..848a0dcb6dfd 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -57,11 +57,44 @@ OPTIONS -s:: --sort=:: - Sort by key(s): pid, comm, dso, symbol, parent, srcline. + Sort histogram entries by given key(s) - multiple keys can be specified + in CSV format. Following sort keys are available: + pid, comm, dso, symbol, parent, cpu, srcline. + + Each key has following meaning: + + - comm: command (name) of the task which can be read via /proc//comm + - pid: command and tid of the task + - dso: name of library or module executed at the time of sample + - symbol: name of function executed at the time of sample + - parent: name of function matched to the parent regex filter. Unmatched + entries are displayed as "[other]". + - cpu: cpu number the task ran at the time of sample + - srcline: filename and line number executed at the time of sample. The + DWARF debuggin info must be provided. + + By default, comm, dso and symbol keys are used. + (i.e. --sort comm,dso,symbol) + + If --branch-stack option is used, following sort keys are also + available: + dso_from, dso_to, symbol_from, symbol_to, mispredict. + + - dso_from: name of library or module branched from + - dso_to: name of library or module branched to + - symbol_from: name of function branched from + - symbol_to: name of function branched to + - mispredict: "N" for predicted branch, "Y" for mispredicted branch + + And default sort keys are changed to comm, dso_from, symbol_from, dso_to + and symbol_to, see '--branch-stack'. -p:: --parent=:: - regex filter to identify parent, see: '--sort parent' + A regex filter to identify parent. The parent is a caller of this + function and searched through the callchain, thus it requires callchain + information recorded. The pattern is in the exteneded regex format and + defaults to "\^sys_|^do_page_fault", see '--sort parent'. -x:: --exclude-other:: @@ -74,7 +107,6 @@ OPTIONS -t:: --field-separator=:: - Use a special separator character and don't pad with spaces, replacing all occurrences of this separator in symbol names (and other output) with a '.' character, that thus it's the only non valid separator. diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 13cdf61c4f82..47a864478543 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -595,8 +595,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) OPT_BOOLEAN(0, "stdio", &report.use_stdio, "Use the stdio interface"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - "sort by key(s): pid, comm, dso, symbol, parent, dso_to," - " dso_from, symbol_to, symbol_from, mispredict"), + "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline," + " dso_to, dso_from, symbol_to, symbol_from, mispredict"), OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, "Show sample percentage for different cpu modes"), OPT_STRING('p', "parent", &parent_pattern, "regex", -- cgit v1.2.3-59-g8ed1b From 164c800e42657e586f6fd446b3d8d41cf1f815f1 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 14 Jan 2013 10:46:47 -0700 Subject: perf symbols: Move name malloc to when needed in dso__load Memory is currently leaked on some paths. Signed-off-by: David Ahern Link: http://lkml.kernel.org/r/1358185607-90799-1-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 2960284d6ae1..daf95549e7b9 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -768,10 +768,6 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) else machine = NULL; - name = malloc(PATH_MAX); - if (!name) - return -1; - dso->adjust_symbols = 0; if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { @@ -795,6 +791,10 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) if (machine) root_dir = machine->root_dir; + name = malloc(PATH_MAX); + if (!name) + return -1; + /* Iterate over candidate debug images. * Keep track of "interesting" ones (those which have a symtab, dynsym, * and/or opd section) for processing. -- cgit v1.2.3-59-g8ed1b From b7c14a0b6a4e3add92e1527a31cb1826f4799248 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 14 Jan 2013 10:47:30 -0700 Subject: perf symbols: Mark vmlinux filename as allocated Needs to be marked allocated so memory can be freed when dso is deleted. Signed-off-by: David Ahern Link: http://lkml.kernel.org/r/1358185650-90848-1-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index daf95549e7b9..e6432d85b43d 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -923,8 +923,10 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, filename = dso__build_id_filename(dso, NULL, 0); if (filename != NULL) { err = dso__load_vmlinux(dso, map, filename, filter); - if (err > 0) + if (err > 0) { + dso->lname_alloc = 1; goto out; + } free(filename); } @@ -932,6 +934,7 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter); if (err > 0) { dso__set_long_name(dso, strdup(vmlinux_path[i])); + dso->lname_alloc = 1; break; } } @@ -971,6 +974,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, if (err > 0) { dso__set_long_name(dso, strdup(symbol_conf.vmlinux_name)); + dso->lname_alloc = 1; goto out_fixup; } return err; -- cgit v1.2.3-59-g8ed1b From 2c803e5248d038988ec7c52e8fd7c83130dd3c13 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 14 Jan 2013 10:48:01 -0700 Subject: perf tools: Move get_term_dimensions from top to util.c It is used by util/help.c so it should be a lib function and included in libperf.a. Code move only. Signed-off-by: David Ahern Link: http://lkml.kernel.org/r/1358185681-90926-1-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-top.c | 22 ---------------------- tools/perf/perf.h | 4 ---- tools/perf/util/util.c | 22 ++++++++++++++++++++++ tools/perf/util/util.h | 3 +++ 4 files changed, 25 insertions(+), 26 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index bc788126397b..e05ba817f8ce 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -68,28 +68,6 @@ #include #include -void get_term_dimensions(struct winsize *ws) -{ - char *s = getenv("LINES"); - - if (s != NULL) { - ws->ws_row = atoi(s); - s = getenv("COLUMNS"); - if (s != NULL) { - ws->ws_col = atoi(s); - if (ws->ws_row && ws->ws_col) - return; - } - } -#ifdef TIOCGWINSZ - if (ioctl(1, TIOCGWINSZ, ws) == 0 && - ws->ws_row && ws->ws_col) - return; -#endif - ws->ws_row = 25; - ws->ws_col = 80; -} - static void perf_top__update_print_entries(struct perf_top *top) { if (top->print_entries > 9) diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 7622f15bcfb7..8f3bf388e414 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -1,10 +1,6 @@ #ifndef _PERF_PERF_H #define _PERF_PERF_H -struct winsize; - -void get_term_dimensions(struct winsize *ws); - #include #if defined(__i386__) diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 252b889ac8c2..805d1f52c5b4 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -220,3 +220,25 @@ void dump_stack(void) #else void dump_stack(void) {} #endif + +void get_term_dimensions(struct winsize *ws) +{ + char *s = getenv("LINES"); + + if (s != NULL) { + ws->ws_row = atoi(s); + s = getenv("COLUMNS"); + if (s != NULL) { + ws->ws_col = atoi(s); + if (ws->ws_row && ws->ws_col) + return; + } + } +#ifdef TIOCGWINSZ + if (ioctl(1, TIOCGWINSZ, ws) == 0 && + ws->ws_row && ws->ws_col) + return; +#endif + ws->ws_row = 25; + ws->ws_col = 80; +} diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index c2330918110c..ec5de5e3330c 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -271,4 +271,7 @@ void dump_stack(void); extern unsigned int page_size; +struct winsize; +void get_term_dimensions(struct winsize *ws); + #endif -- cgit v1.2.3-59-g8ed1b From 865c66c4183aab1d12522ca3ad3a3339d2437e5c Mon Sep 17 00:00:00 2001 From: Frederik Deweerdt Date: Mon, 14 Jan 2013 14:47:17 -0500 Subject: perf annotate browser: Fix segfault when drawing out-of-bounds jumps Factorize jump sanity checks from mark_jump_targets() and draw_current_jump() in an is_valid_jump() function. This fixes a segfault when moving the cursor over an invalid jump. Signed-off-by: Frederik Deweerdt Cc: Namhyung Kim Link: http://lkml.kernel.org/r/20130114194716.GA4973@ks398093.ip-192-95-24.net [ committer note: Make it a disasm_line method ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/annotate.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 5dab3ca96980..2fc7f04691cf 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -182,6 +182,16 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int ab->selection = dl; } +static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sym) +{ + if (!dl || !dl->ins || !ins__is_jump(dl->ins) + || !disasm_line__has_offset(dl) + || dl->ops.target.offset >= symbol__size(sym)) + return false; + + return true; +} + static void annotate_browser__draw_current_jump(struct ui_browser *browser) { struct annotate_browser *ab = container_of(browser, struct annotate_browser, b); @@ -195,8 +205,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) if (strstr(sym->name, "@plt")) return; - if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) || - !disasm_line__has_offset(cursor)) + if (!disasm_line__is_valid_jump(cursor, sym)) return; target = ab->offsets[cursor->ops.target.offset]; @@ -788,17 +797,9 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser struct disasm_line *dl = browser->offsets[offset], *dlt; struct browser_disasm_line *bdlt; - if (!dl || !dl->ins || !ins__is_jump(dl->ins) || - !disasm_line__has_offset(dl)) + if (!disasm_line__is_valid_jump(dl, sym)) continue; - if (dl->ops.target.offset >= size) { - ui__error("jump to after symbol!\n" - "size: %zx, jump target: %" PRIx64, - size, dl->ops.target.offset); - continue; - } - dlt = browser->offsets[dl->ops.target.offset]; /* * FIXME: Oops, no jump target? Buggy disassembler? Or do we @@ -921,7 +922,7 @@ out_free_offsets: #define ANNOTATE_CFG(n) \ { .name = #n, .value = &annotate_browser__opts.n, } - + /* * Keep the entries sorted, they are bsearch'ed */ -- cgit v1.2.3-59-g8ed1b From 3cf0cb1f899640f1eb8b3984739cfd70375c9c36 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 14 Jan 2013 15:02:45 +0100 Subject: perf tools: Mark branch_info maps as referenced As noticed by Jiri, the hist_entry->branch_info.to/from maps need to be marked as referenced to avoid problems later on. So we do this when the hist_entry is allocated. Signed-off-by: Stephane Eranian Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20130114140245.GA4692@quad Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/hist.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 9485c7024f5b..8170a3d11ffa 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -245,6 +245,14 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template) if (he->ms.map) he->ms.map->referenced = true; + + if (he->branch_info) { + if (he->branch_info->from.map) + he->branch_info->from.map->referenced = true; + if (he->branch_info->to.map) + he->branch_info->to.map->referenced = true; + } + if (symbol_conf.use_callchain) callchain_init(he->callchain); -- cgit v1.2.3-59-g8ed1b From d8f7bbc947afb59c68a8574d1fe99b20cff2b1be Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 15 Jan 2013 14:39:51 +0100 Subject: perf tools: Remove unused 'unset' parameter from parse_events The 'unset' parameter is option callback leftover with no use, removing. Signed-off-by: Jiri Olsa Cc: Andi Kleen Cc: Corey Ashford Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Ulrich Drepper Link: http://lkml.kernel.org/r/1358257194-8204-2-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/evsel-roundtrip-name.c | 4 ++-- tools/perf/tests/hists_link.c | 4 ++-- tools/perf/tests/parse-events.c | 2 +- tools/perf/util/parse-events.c | 5 ++--- tools/perf/util/parse-events.h | 3 +-- 5 files changed, 8 insertions(+), 10 deletions(-) diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c index e61fc828a158..0fd99a9adb91 100644 --- a/tools/perf/tests/evsel-roundtrip-name.c +++ b/tools/perf/tests/evsel-roundtrip-name.c @@ -22,7 +22,7 @@ static int perf_evsel__roundtrip_cache_name_test(void) for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { __perf_evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); - err = parse_events(evlist, name, 0); + err = parse_events(evlist, name); if (err) ret = err; } @@ -70,7 +70,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names) return -ENOMEM; for (i = 0; i < nr_names; ++i) { - err = parse_events(evlist, names[i], 0); + err = parse_events(evlist, names[i]); if (err) { pr_debug("failed to parse event '%s', err %d\n", names[i], err); diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c index 27860a082381..0afd9223bde7 100644 --- a/tools/perf/tests/hists_link.c +++ b/tools/perf/tests/hists_link.c @@ -441,10 +441,10 @@ int test__hists_link(void) if (evlist == NULL) return -ENOMEM; - err = parse_events(evlist, "cpu-clock", 0); + err = parse_events(evlist, "cpu-clock"); if (err) goto out; - err = parse_events(evlist, "task-clock", 0); + err = parse_events(evlist, "task-clock"); if (err) goto out; diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index e7eb708da32c..337424d02e53 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1018,7 +1018,7 @@ static int test_event(struct test__event_st *e) if (evlist == NULL) return -ENOMEM; - ret = parse_events(evlist, e->name, 0); + ret = parse_events(evlist, e->name); if (ret) { pr_debug("failed to parse event '%s', err %d\n", e->name, ret); diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 626c120f00da..d3bf570a7084 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -872,8 +872,7 @@ int parse_events_terms(struct list_head *terms, const char *str) return ret; } -int parse_events(struct perf_evlist *evlist, const char *str, - int unset __maybe_unused) +int parse_events(struct perf_evlist *evlist, const char *str) { struct parse_events_data__events data = { .list = LIST_HEAD_INIT(data.list), @@ -900,7 +899,7 @@ int parse_events_option(const struct option *opt, const char *str, int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; - int ret = parse_events(evlist, str, unset); + int ret = parse_events(evlist, str); if (ret) { fprintf(stderr, "invalid or unsupported event: '%s'\n", str); diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index b7af80b8bdda..7c5244fa0885 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -29,8 +29,7 @@ const char *event_type(int type); extern int parse_events_option(const struct option *opt, const char *str, int unset); -extern int parse_events(struct perf_evlist *evlist, const char *str, - int unset); +extern int parse_events(struct perf_evlist *evlist, const char *str); extern int parse_events_terms(struct list_head *terms, const char *str); extern int parse_filter(const struct option *opt, const char *str, int unset); -- cgit v1.2.3-59-g8ed1b From ff582680f8762293aa45eb5cf2faacadbe73b435 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 15 Jan 2013 17:02:19 +0900 Subject: tools lib traceevent: Fix warning on '>=' operator Although the '>=' (and '<=') operator is handled properly in libtraceevent, it emitted following spurious warnings on perf test: $ perf test 5: parse events tests : ... Warning: unknown op '>=' Warning: unknown op '>=' Warning: unknown op '>=' Warning: unknown op '>=' Warning: unknown op '>=' Warning: unknown op '>=' ... Add the operator to the checks. Signed-off-by: Namhyung Kim Acked-by: Steven Rostedt Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1358236939-17393-1-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/traceevent/event-parse.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index dc42e55469d1..f5046191d086 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -1784,6 +1784,8 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) strcmp(token, "/") == 0 || strcmp(token, "<") == 0 || strcmp(token, ">") == 0 || + strcmp(token, "<=") == 0 || + strcmp(token, ">=") == 0 || strcmp(token, "==") == 0 || strcmp(token, "!=") == 0) { -- cgit v1.2.3-59-g8ed1b From 1aa3d1780f518080e6a51a5288cd05fb4b34d82c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 16 Jan 2013 20:59:54 +0900 Subject: perf tools: Get rid of unused include of config.mak These lines are came from GIT Makefile and never used for perf. I found it from make -d output during working on previous patch. Updating makefiles.... Considering target file `arch/x86/Makefile'. No need to remake target `arch/x86/Makefile'. Considering target file `config.mak'. File `config.mak' does not exist. Must remake target `config.mak'. Failed to remake target file `config.mak'. Considering target file `config.mak.autogen'. File `config.mak.autogen' does not exist. Must remake target `config.mak.autogen'. Failed to remake target file `config.mak.autogen'. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1358337594-10916-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 3 --- 1 file changed, 3 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index e18163bb32d7..6aef6d00e3c2 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -534,9 +534,6 @@ ifneq ($(MAKECMDGOALS),tags) # because maintaining the nesting to match is a pain. If # we had "elif" things would have been much nicer... --include config.mak.autogen --include config.mak - ifdef NO_LIBELF NO_DWARF := 1 NO_DEMANGLE := 1 -- cgit v1.2.3-59-g8ed1b From 3cecaa2002273887a9364c454684fa8491bb2b10 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 16 Jan 2013 20:59:53 +0900 Subject: perf tools: Do not include PERF-VERSION-FILE to Makefile When make runs it tries to update the Makefile rules by reading all of included Makefiles. During the perf build it checks PERF-VERSION-FILE to get the current version number. But it triggers Makefile update so that make runs again with the update Makefile and, in turn, users will see duplicate CHK message on the second path. Running make with -d option for debugging tells me this: GNU Make 3.82 Built for x86_64-redhat-linux-gnu Copyright (C) 2010 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Reading makefiles... Reading makefile `Makefile'... Reading makefile `../scripts/Makefile.include' (search path) (no ~ expansion)... Reading makefile `config/utilities.mak' (search path) (no ~ expansion)... Reading makefile `PERF-VERSION-FILE' (search path) (don't care) (no ~ expansion)... Reading makefile `config/feature-tests.mak' (search path) (don't care) (no ~ expansion)... CHK -fstack-protector-all CHK -Wstack-protector CHK -Wvolatile-register-var ... Updating makefiles.... Considering target file `PERF-VERSION-FILE'. Must remake target `PERF-VERSION-FILE'. Invoking recipe from Makefile:52 to update target `PERF-VERSION-FILE'. Putting child 0x14037a0 (PERF-VERSION-FILE) PID 31925 on the chain. Live child 0x14037a0 (PERF-VERSION-FILE) PID 31925 PERF_VERSION = 3.8.rc3.gf751db6 Reaping winning child 0x14037a0 PID 31925 Removing child 0x14037a0 PID 31925 from chain. Successfully remade target file `PERF-VERSION-FILE'. ... Re-executing[1]: make -d <------------ here GNU Make 3.82 Built for x86_64-redhat-linux-gnu Copyright (C) 2010 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Reading makefiles... Reading makefile `Makefile'... Reading makefile `../scripts/Makefile.include' (search path) (no ~ expansion)... Reading makefile `config/utilities.mak' (search path) (no ~ expansion)... Reading makefile `PERF-VERSION-FILE' (search path) (don't care) (no ~ expansion)... Reading makefile `config/feature-tests.mak' (search path) (don't care) (no ~ expansion)... CHK -fstack-protector-all CHK -Wstack-protector CHK -Wvolatile-register-var ... Actually PERF-VERSION-FILE is used only for perf.c to #define PERF_VERSION macro. So make it like a C header file and include it during compiling the perf.c file will remove the need of being included into Makefile. Hench no need to update the Makefile and no CHK lines anymore. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1358337594-10916-1-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 3 +-- tools/perf/util/PERF-VERSION-GEN | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 6aef6d00e3c2..a84021abb3fe 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -50,7 +50,6 @@ include config/utilities.mak $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) --include $(OUTPUT)PERF-VERSION-FILE uname_M := $(shell uname -m 2>/dev/null || echo not) @@ -887,7 +886,7 @@ strip: $(PROGRAMS) $(OUTPUT)perf $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf $(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \ + $(QUIET_CC)$(CC) -include $(OUTPUT)PERF-VERSION-FILE \ '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@ diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN index 6aa34e5afdcf..055fef34b6f6 100755 --- a/tools/perf/util/PERF-VERSION-GEN +++ b/tools/perf/util/PERF-VERSION-GEN @@ -26,13 +26,13 @@ VN=$(expr "$VN" : v*'\(.*\)') if test -r $GVF then - VC=$(sed -e 's/^PERF_VERSION = //' <$GVF) + VC=$(sed -e 's/^#define PERF_VERSION "\(.*\)"/\1/' <$GVF) else VC=unset fi test "$VN" = "$VC" || { echo >&2 "PERF_VERSION = $VN" - echo "PERF_VERSION = $VN" >$GVF + echo "#define PERF_VERSION \"$VN\"" >$GVF } -- cgit v1.2.3-59-g8ed1b From 15268138e334bd0362f8395edac4822351714a22 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Thu, 17 Jan 2013 09:11:30 -0800 Subject: perf tools: Fix PMU format parsing test failure On POWER, the 'perf format parsing' test always fails. Looks like it is because memset() is being passed number of longs rather than number of bytes. It is interesting that the test always passes on my x86 box. With this patch, the test passes on POWER and continues to pass on x86. Signed-off-by: Sukadev Bhattiprolu Acked-by: Jiri Olsa Cc: Anton Blanchard Cc: Jiri Olsa Cc: Paul Mackerras Cc: linuxppc-dev@ozlabs.org Link: http://lkml.kernel.org/r/20130117172814.GA18882@us.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/include/linux/bitops.h | 1 + tools/perf/util/pmu.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index a55d8cf083c9..45cf10a562bd 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h @@ -14,6 +14,7 @@ #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) +#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE) #define for_each_set_bit(bit, addr, size) \ for ((bit) = find_first_bit((addr), (size)); \ diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 9bdc60c6f138..b93ff146766e 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -548,7 +548,7 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) if (!to) to = from; - memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS)); + memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS)); for (b = from; b <= to; b++) set_bit(b, bits); } -- cgit v1.2.3-59-g8ed1b From 08aa9cce6bf3ecdfa386cf0a0be842855eac3e7d Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:41 +0900 Subject: perf tools: Move ltrim() to util/string.c As we have ltrim() implementation in builtin-script.c move it to the more generic location of util/string.c so that it can be used from other places. Signed-off-by: Namhyung Kim Cc: Feng Tang Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1358845787-1350-14-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 12 ------------ tools/perf/util/string.c | 18 ++++++++++++++++++ tools/perf/util/util.h | 1 + 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 3314ef2ec83e..fee4c9a8aee5 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -909,18 +909,6 @@ static const char *ends_with(const char *str, const char *suffix) return NULL; } -static char *ltrim(char *str) -{ - int len = strlen(str); - - while (len && isspace(*str)) { - len--; - str++; - } - - return str; -} - static int read_script_info(struct script_desc *desc, const char *filename) { char line[BUFSIZ], *p; diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index 346707df04b9..29c7b2cb2521 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -331,6 +331,24 @@ char *strxfrchar(char *s, char from, char to) return s; } +/** + * ltrim - Removes leading whitespace from @s. + * @s: The string to be stripped. + * + * Return pointer to the first non-whitespace character in @s. + */ +char *ltrim(char *s) +{ + int len = strlen(s); + + while (len && isspace(*s)) { + len--; + s++; + } + + return s; +} + /** * rtrim - Removes trailing whitespace from @s. * @s: The string to be stripped. diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index ec5de5e3330c..09b4c26b71aa 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -265,6 +265,7 @@ bool is_power_of_2(unsigned long n) size_t hex_width(u64 v); int hex2u64(const char *ptr, u64 *val); +char *ltrim(char *s); char *rtrim(char *s); void dump_stack(void); -- cgit v1.2.3-59-g8ed1b From 6cee6cd310638cd5751eea0c81315b17eb7c28a9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 18 Jan 2013 16:29:49 -0300 Subject: perf tools: Fix usage of __ in parse_events_term struct In tools/perf we use a convention where __ separates the struct name from the function name for functions that operate on a struct instance. Fix this usage by removing it from the struct parse_events_term and fix also its associated functions. Acked-by: Jiri Olsa Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-h6vkql4jr7dv0096f1s6hldm@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/parse-events.c | 10 +++++----- tools/perf/tests/pmu.c | 4 ++-- tools/perf/util/parse-events.c | 26 +++++++++++++------------- tools/perf/util/parse-events.h | 14 +++++++------- tools/perf/util/parse-events.y | 38 +++++++++++++++++++------------------- tools/perf/util/pmu.c | 12 ++++++------ 6 files changed, 52 insertions(+), 52 deletions(-) diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 337424d02e53..9ac45c58305e 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -464,10 +464,10 @@ static int test__checkevent_pmu_events(struct perf_evlist *evlist) static int test__checkterms_simple(struct list_head *terms) { - struct parse_events__term *term; + struct parse_events_term *term; /* config=10 */ - term = list_entry(terms->next, struct parse_events__term, list); + term = list_entry(terms->next, struct parse_events_term, list); TEST_ASSERT_VAL("wrong type term", term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG); TEST_ASSERT_VAL("wrong type val", @@ -476,7 +476,7 @@ static int test__checkterms_simple(struct list_head *terms) TEST_ASSERT_VAL("wrong config", !term->config); /* config1 */ - term = list_entry(term->list.next, struct parse_events__term, list); + term = list_entry(term->list.next, struct parse_events_term, list); TEST_ASSERT_VAL("wrong type term", term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1); TEST_ASSERT_VAL("wrong type val", @@ -485,7 +485,7 @@ static int test__checkterms_simple(struct list_head *terms) TEST_ASSERT_VAL("wrong config", !term->config); /* config2=3 */ - term = list_entry(term->list.next, struct parse_events__term, list); + term = list_entry(term->list.next, struct parse_events_term, list); TEST_ASSERT_VAL("wrong type term", term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2); TEST_ASSERT_VAL("wrong type val", @@ -494,7 +494,7 @@ static int test__checkterms_simple(struct list_head *terms) TEST_ASSERT_VAL("wrong config", !term->config); /* umask=1*/ - term = list_entry(term->list.next, struct parse_events__term, list); + term = list_entry(term->list.next, struct parse_events_term, list); TEST_ASSERT_VAL("wrong type term", term->type_term == PARSE_EVENTS__TERM_TYPE_USER); TEST_ASSERT_VAL("wrong type val", diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c index a5f379863b8f..6803ed1a06dd 100644 --- a/tools/perf/tests/pmu.c +++ b/tools/perf/tests/pmu.c @@ -22,7 +22,7 @@ static struct test_format { #define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format)) /* Simulated users input. */ -static struct parse_events__term test_terms[] = { +static struct parse_events_term test_terms[] = { { .config = (char *) "krava01", .val.num = 15, @@ -78,7 +78,7 @@ static struct parse_events__term test_terms[] = { .type_term = PARSE_EVENTS__TERM_TYPE_USER, }, }; -#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term)) +#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events_term)) /* * Prepare format directory data, exported by kernel diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index d3bf570a7084..e5c0fcbe8dbe 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -534,7 +534,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx, } static int config_term(struct perf_event_attr *attr, - struct parse_events__term *term) + struct parse_events_term *term) { #define CHECK_TYPE_VAL(type) \ do { \ @@ -579,7 +579,7 @@ do { \ static int config_attr(struct perf_event_attr *attr, struct list_head *head, int fail) { - struct parse_events__term *term; + struct parse_events_term *term; list_for_each_entry(term, head, list) if (config_term(attr, term) && fail) @@ -605,14 +605,14 @@ int parse_events_add_numeric(struct list_head **list, int *idx, return add_event(list, idx, &attr, NULL); } -static int parse_events__is_name_term(struct parse_events__term *term) +static int parse_events__is_name_term(struct parse_events_term *term) { return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; } static char *pmu_event_name(struct list_head *head_terms) { - struct parse_events__term *term; + struct parse_events_term *term; list_for_each_entry(term, head_terms, list) if (parse_events__is_name_term(term)) @@ -1162,16 +1162,16 @@ void print_events(const char *event_glob, bool name_only) print_tracepoint_events(NULL, NULL, name_only); } -int parse_events__is_hardcoded_term(struct parse_events__term *term) +int parse_events__is_hardcoded_term(struct parse_events_term *term) { return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; } -static int new_term(struct parse_events__term **_term, int type_val, +static int new_term(struct parse_events_term **_term, int type_val, int type_term, char *config, char *str, u64 num) { - struct parse_events__term *term; + struct parse_events_term *term; term = zalloc(sizeof(*term)); if (!term) @@ -1197,21 +1197,21 @@ static int new_term(struct parse_events__term **_term, int type_val, return 0; } -int parse_events__term_num(struct parse_events__term **term, +int parse_events_term__num(struct parse_events_term **term, int type_term, char *config, u64 num) { return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, config, NULL, num); } -int parse_events__term_str(struct parse_events__term **term, +int parse_events_term__str(struct parse_events_term **term, int type_term, char *config, char *str) { return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term, config, str, 0); } -int parse_events__term_sym_hw(struct parse_events__term **term, +int parse_events_term__sym_hw(struct parse_events_term **term, char *config, unsigned idx) { struct event_symbol *sym; @@ -1229,8 +1229,8 @@ int parse_events__term_sym_hw(struct parse_events__term **term, (char *) "event", (char *) sym->symbol, 0); } -int parse_events__term_clone(struct parse_events__term **new, - struct parse_events__term *term) +int parse_events_term__clone(struct parse_events_term **new, + struct parse_events_term *term) { return new_term(new, term->type_val, term->type_term, term->config, term->val.str, term->val.num); @@ -1238,7 +1238,7 @@ int parse_events__term_clone(struct parse_events__term **new, void parse_events__free_terms(struct list_head *terms) { - struct parse_events__term *term, *h; + struct parse_events_term *term, *h; list_for_each_entry_safe(term, h, terms, list) free(term); diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 7c5244fa0885..dd2978e7b8c0 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -50,7 +50,7 @@ enum { PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE, }; -struct parse_events__term { +struct parse_events_term { char *config; union { char *str; @@ -70,15 +70,15 @@ struct parse_events_data__terms { struct list_head *terms; }; -int parse_events__is_hardcoded_term(struct parse_events__term *term); -int parse_events__term_num(struct parse_events__term **_term, +int parse_events__is_hardcoded_term(struct parse_events_term *term); +int parse_events_term__num(struct parse_events_term **_term, int type_term, char *config, u64 num); -int parse_events__term_str(struct parse_events__term **_term, +int parse_events_term__str(struct parse_events_term **_term, int type_term, char *config, char *str); -int parse_events__term_sym_hw(struct parse_events__term **term, +int parse_events_term__sym_hw(struct parse_events_term **term, char *config, unsigned idx); -int parse_events__term_clone(struct parse_events__term **new, - struct parse_events__term *term); +int parse_events_term__clone(struct parse_events_term **new, + struct parse_events_term *term); void parse_events__free_terms(struct list_head *terms); int parse_events__modifier_event(struct list_head *list, char *str, bool add); int parse_events__modifier_group(struct list_head *list, char *event_mod); diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 0f9914ae6bac..57ecf9db1f29 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -68,7 +68,7 @@ do { \ char *str; u64 num; struct list_head *head; - struct parse_events__term *term; + struct parse_events_term *term; } %% @@ -315,7 +315,7 @@ event_config: event_config ',' event_term { struct list_head *head = $1; - struct parse_events__term *term = $3; + struct parse_events_term *term = $3; ABORT_ON(!head); list_add_tail(&term->list, head); @@ -325,7 +325,7 @@ event_config ',' event_term event_term { struct list_head *head = malloc(sizeof(*head)); - struct parse_events__term *term = $1; + struct parse_events_term *term = $1; ABORT_ON(!head); INIT_LIST_HEAD(head); @@ -336,70 +336,70 @@ event_term event_term: PE_NAME '=' PE_NAME { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_str(&term, PARSE_EVENTS__TERM_TYPE_USER, + ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3)); $$ = term; } | PE_NAME '=' PE_VALUE { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER, + ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3)); $$ = term; } | PE_NAME '=' PE_VALUE_SYM_HW { - struct parse_events__term *term; + struct parse_events_term *term; int config = $3 & 255; - ABORT_ON(parse_events__term_sym_hw(&term, $1, config)); + ABORT_ON(parse_events_term__sym_hw(&term, $1, config)); $$ = term; } | PE_NAME { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER, + ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, 1)); $$ = term; } | PE_VALUE_SYM_HW { - struct parse_events__term *term; + struct parse_events_term *term; int config = $1 & 255; - ABORT_ON(parse_events__term_sym_hw(&term, NULL, config)); + ABORT_ON(parse_events_term__sym_hw(&term, NULL, config)); $$ = term; } | PE_TERM '=' PE_NAME { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_str(&term, (int)$1, NULL, $3)); + ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3)); $$ = term; } | PE_TERM '=' PE_VALUE { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, $3)); + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3)); $$ = term; } | PE_TERM { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, 1)); + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1)); $$ = term; } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index b93ff146766e..c107e24b75f8 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -175,12 +175,12 @@ static int pmu_aliases(char *name, struct list_head *head) static int pmu_alias_terms(struct perf_pmu__alias *alias, struct list_head *terms) { - struct parse_events__term *term, *clone; + struct parse_events_term *term, *clone; LIST_HEAD(list); int ret; list_for_each_entry(term, &alias->terms, list) { - ret = parse_events__term_clone(&clone, term); + ret = parse_events_term__clone(&clone, term); if (ret) { parse_events__free_terms(&list); return ret; @@ -403,7 +403,7 @@ static __u64 pmu_format_value(unsigned long *format, __u64 value) */ static int pmu_config_term(struct list_head *formats, struct perf_event_attr *attr, - struct parse_events__term *term) + struct parse_events_term *term) { struct perf_pmu__format *format; __u64 *vp; @@ -450,7 +450,7 @@ int perf_pmu__config_terms(struct list_head *formats, struct perf_event_attr *attr, struct list_head *head_terms) { - struct parse_events__term *term; + struct parse_events_term *term; list_for_each_entry(term, head_terms, list) if (pmu_config_term(formats, attr, term)) @@ -472,7 +472,7 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, } static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, - struct parse_events__term *term) + struct parse_events_term *term) { struct perf_pmu__alias *alias; char *name; @@ -507,7 +507,7 @@ static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, */ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms) { - struct parse_events__term *term, *h; + struct parse_events_term *term, *h; struct perf_pmu__alias *alias; int ret; -- cgit v1.2.3-59-g8ed1b From 5c6ccc3755650497234f9027a0ac9e3b2cf1ba9f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 18 Jan 2013 16:54:00 -0300 Subject: perf pmu: Fix usage of __ in struct names In tools/perf we use a convention where __ separates the struct name from the function name for functions that operate on a struct instance. Fix this usage by removing it from the struct names and fix also the associated functions. Acked-by: Jiri Olsa Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-1tepcpohpvfg589pizx7tlkq@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/pmu.c | 20 ++++++++++---------- tools/perf/util/pmu.h | 6 +++--- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index c107e24b75f8..244edbbe1969 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -85,7 +85,7 @@ static int pmu_format(char *name, struct list_head *format) static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file) { - struct perf_pmu__alias *alias; + struct perf_pmu_alias *alias; char buf[256]; int ret; @@ -172,7 +172,7 @@ static int pmu_aliases(char *name, struct list_head *head) return 0; } -static int pmu_alias_terms(struct perf_pmu__alias *alias, +static int pmu_alias_terms(struct perf_pmu_alias *alias, struct list_head *terms) { struct parse_events_term *term, *clone; @@ -360,10 +360,10 @@ struct perf_pmu *perf_pmu__find(char *name) return pmu_lookup(name); } -static struct perf_pmu__format* +static struct perf_pmu_format * pmu_find_format(struct list_head *formats, char *name) { - struct perf_pmu__format *format; + struct perf_pmu_format *format; list_for_each_entry(format, formats, list) if (!strcmp(format->name, name)) @@ -405,7 +405,7 @@ static int pmu_config_term(struct list_head *formats, struct perf_event_attr *attr, struct parse_events_term *term) { - struct perf_pmu__format *format; + struct perf_pmu_format *format; __u64 *vp; /* @@ -471,10 +471,10 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, return perf_pmu__config_terms(&pmu->format, attr, head_terms); } -static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, - struct parse_events_term *term) +static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu, + struct parse_events_term *term) { - struct perf_pmu__alias *alias; + struct perf_pmu_alias *alias; char *name; if (parse_events__is_hardcoded_term(term)) @@ -508,7 +508,7 @@ static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms) { struct parse_events_term *term, *h; - struct perf_pmu__alias *alias; + struct perf_pmu_alias *alias; int ret; list_for_each_entry_safe(term, h, head_terms, list) { @@ -527,7 +527,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms) int perf_pmu__new_format(struct list_head *list, char *name, int config, unsigned long *bits) { - struct perf_pmu__format *format; + struct perf_pmu_format *format; format = zalloc(sizeof(*format)); if (!format) diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index a313ed76a49a..e881dd180acb 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -12,14 +12,14 @@ enum { #define PERF_PMU_FORMAT_BITS 64 -struct perf_pmu__format { +struct perf_pmu_format { char *name; int value; DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS); struct list_head list; }; -struct perf_pmu__alias { +struct perf_pmu_alias { char *name; struct list_head terms; struct list_head list; @@ -42,7 +42,7 @@ int perf_pmu__config_terms(struct list_head *formats, struct list_head *head_terms); int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms); struct list_head *perf_pmu__alias(struct perf_pmu *pmu, - struct list_head *head_terms); + struct list_head *head_terms); int perf_pmu_wrap(void); void perf_pmu_error(struct list_head *list, char *name, char const *msg); -- cgit v1.2.3-59-g8ed1b From 7c3102b843a581b4b84643a18d423f8807364ca0 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 18 Jan 2013 16:55:52 -0300 Subject: perf ui browsers: Fix usage of __ in struct names In tools/perf we use a convention where __ separates the struct name from the function name for functions that operate on a struct instance. Fix this usage by removing it from the struct names and fix also the associated functions. Acked-by: Jiri Olsa Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-rfj7acng5tukftb8hy1rrw08@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browser.c | 4 ++-- tools/perf/ui/browsers/annotate.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index 4aeb7d5df939..588bcb2d008b 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c @@ -471,7 +471,7 @@ unsigned int ui_browser__list_head_refresh(struct ui_browser *browser) return row; } -static struct ui_browser__colorset { +static struct ui_browser_colorset { const char *name, *fg, *bg; int colorset; } ui_browser__colorsets[] = { @@ -706,7 +706,7 @@ void ui_browser__init(void) perf_config(ui_browser__color_config, NULL); while (ui_browser__colorsets[i].name) { - struct ui_browser__colorset *c = &ui_browser__colorsets[i++]; + struct ui_browser_colorset *c = &ui_browser__colorsets[i++]; sltt_set_color(c->colorset, c->name, c->fg, c->bg); } diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 2fc7f04691cf..7dca1555c610 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -926,7 +926,7 @@ out_free_offsets: /* * Keep the entries sorted, they are bsearch'ed */ -static struct annotate__config { +static struct annotate_config { const char *name; bool *value; } annotate__configs[] = { @@ -940,7 +940,7 @@ static struct annotate__config { static int annotate_config__cmp(const void *name, const void *cfgp) { - const struct annotate__config *cfg = cfgp; + const struct annotate_config *cfg = cfgp; return strcmp(name, cfg->name); } @@ -948,7 +948,7 @@ static int annotate_config__cmp(const void *name, const void *cfgp) static int annotate__config(const char *var, const char *value, void *data __maybe_unused) { - struct annotate__config *cfg; + struct annotate_config *cfg; const char *name; if (prefixcmp(var, "annotate.") != 0) @@ -956,7 +956,7 @@ static int annotate__config(const char *var, const char *value, name = var + 9; cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs), - sizeof(struct annotate__config), annotate_config__cmp); + sizeof(struct annotate_config), annotate_config__cmp); if (cfg == NULL) return -1; -- cgit v1.2.3-59-g8ed1b From 23b6339b914ec009ba206584a410039b589243aa Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 18 Jan 2013 16:56:57 -0300 Subject: perf tools: Fix usage of __ in event parsing struct names In tools/perf we use a convention where __ separates the struct name from the function name for functions that operate on a struct instance. Fix this usage by removing it from the struct names and fix also the associated functions. Acked-by: Jiri Olsa Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-kdcoh7uitivx68otqcz12aaz@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/parse-events.c | 24 ++++++++++++------------ tools/perf/util/parse-events.c | 4 ++-- tools/perf/util/parse-events.h | 4 ++-- tools/perf/util/parse-events.y | 26 +++++++++++++------------- 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 9ac45c58305e..20acaff295d2 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -840,13 +840,13 @@ static int test__all_tracepoints(struct perf_evlist *evlist) return test__checkevent_tracepoint_multi(evlist); } -struct test__event_st { +struct evlist_test { const char *name; __u32 type; int (*check)(struct perf_evlist *evlist); }; -static struct test__event_st test__events[] = { +static struct evlist_test test__events[] = { [0] = { .name = "syscalls:sys_enter_open", .check = test__checkevent_tracepoint, @@ -985,7 +985,7 @@ static struct test__event_st test__events[] = { }, }; -static struct test__event_st test__events_pmu[] = { +static struct evlist_test test__events_pmu[] = { [0] = { .name = "cpu/config=10,config1,config2=3,period=1000/u", .check = test__checkevent_pmu, @@ -996,20 +996,20 @@ static struct test__event_st test__events_pmu[] = { }, }; -struct test__term { +struct terms_test { const char *str; __u32 type; int (*check)(struct list_head *terms); }; -static struct test__term test__terms[] = { +static struct terms_test test__terms[] = { [0] = { .str = "config=10,config1,config2=3,umask=1", .check = test__checkterms_simple, }, }; -static int test_event(struct test__event_st *e) +static int test_event(struct evlist_test *e) { struct perf_evlist *evlist; int ret; @@ -1031,13 +1031,13 @@ static int test_event(struct test__event_st *e) return ret; } -static int test_events(struct test__event_st *events, unsigned cnt) +static int test_events(struct evlist_test *events, unsigned cnt) { int ret1, ret2 = 0; unsigned i; for (i = 0; i < cnt; i++) { - struct test__event_st *e = &events[i]; + struct evlist_test *e = &events[i]; pr_debug("running test %d '%s'\n", i, e->name); ret1 = test_event(e); @@ -1048,7 +1048,7 @@ static int test_events(struct test__event_st *events, unsigned cnt) return ret2; } -static int test_term(struct test__term *t) +static int test_term(struct terms_test *t) { struct list_head *terms; int ret; @@ -1072,13 +1072,13 @@ static int test_term(struct test__term *t) return ret; } -static int test_terms(struct test__term *terms, unsigned cnt) +static int test_terms(struct terms_test *terms, unsigned cnt) { int ret = 0; unsigned i; for (i = 0; i < cnt; i++) { - struct test__term *t = &terms[i]; + struct terms_test *t = &terms[i]; pr_debug("running test %d '%s'\n", i, t->str); ret = test_term(t); @@ -1129,7 +1129,7 @@ static int test_pmu_events(void) while (!ret && (ent = readdir(dir))) { #define MAX_NAME 100 - struct test__event_st e; + struct evlist_test e; char name[MAX_NAME]; if (!strcmp(ent->d_name, ".") || diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index e5c0fcbe8dbe..02f6421f03a0 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -856,7 +856,7 @@ static int parse_events__scanner(const char *str, void *data, int start_token) */ int parse_events_terms(struct list_head *terms, const char *str) { - struct parse_events_data__terms data = { + struct parse_events_terms data = { .terms = NULL, }; int ret; @@ -874,7 +874,7 @@ int parse_events_terms(struct list_head *terms, const char *str) int parse_events(struct perf_evlist *evlist, const char *str) { - struct parse_events_data__events data = { + struct parse_events_evlist data = { .list = LIST_HEAD_INIT(data.list), .idx = evlist->nr_entries, }; diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index dd2978e7b8c0..2cd2c42a69c5 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -61,12 +61,12 @@ struct parse_events_term { struct list_head list; }; -struct parse_events_data__events { +struct parse_events_evlist { struct list_head list; int idx; }; -struct parse_events_data__terms { +struct parse_events_terms { struct list_head *terms; }; diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 57ecf9db1f29..9d43c86176ff 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -79,7 +79,7 @@ PE_START_TERMS start_terms start_events: groups { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; parse_events_update_lists($1, &data->list); } @@ -186,7 +186,7 @@ event_def: event_pmu | event_pmu: PE_NAME '/' event_config '/' { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3)); @@ -202,7 +202,7 @@ PE_VALUE_SYM_SW event_legacy_symbol: value_sym '/' event_config '/' { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; int type = $1 >> 16; int config = $1 & 255; @@ -215,7 +215,7 @@ value_sym '/' event_config '/' | value_sym sep_slash_dc { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; int type = $1 >> 16; int config = $1 & 255; @@ -228,7 +228,7 @@ value_sym sep_slash_dc event_legacy_cache: PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5)); @@ -237,7 +237,7 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT | PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL)); @@ -246,7 +246,7 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT | PE_NAME_CACHE_TYPE { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL)); @@ -256,7 +256,7 @@ PE_NAME_CACHE_TYPE event_legacy_mem: PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, @@ -266,7 +266,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc | PE_PREFIX_MEM PE_VALUE sep_dc { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, @@ -277,7 +277,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc event_legacy_tracepoint: PE_NAME ':' PE_NAME { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3)); @@ -287,7 +287,7 @@ PE_NAME ':' PE_NAME event_legacy_numeric: PE_VALUE ':' PE_VALUE { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL)); @@ -297,7 +297,7 @@ PE_VALUE ':' PE_VALUE event_legacy_raw: PE_RAW { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; struct list_head *list = NULL; ABORT_ON(parse_events_add_numeric(&list, &data->idx, @@ -307,7 +307,7 @@ PE_RAW start_terms: event_config { - struct parse_events_data__terms *data = _data; + struct parse_events_terms *data = _data; data->terms = $1; } -- cgit v1.2.3-59-g8ed1b From be651ed98dd362fa4151f49f917c74e9e8da88ac Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 18 Jan 2013 17:03:43 -0300 Subject: perf tests: Use ARRAY_SIZE() were applicable We were using a homebrew equivalent, use the macro that is used everywhere for this function. Acked-by: Jiri Olsa Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-bp3wokafua1ecairau77jcy0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/pmu.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c index 6803ed1a06dd..12b322fa3475 100644 --- a/tools/perf/tests/pmu.c +++ b/tools/perf/tests/pmu.c @@ -19,8 +19,6 @@ static struct test_format { { "krava23", "config2:28-29,38\n", }, }; -#define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format)) - /* Simulated users input. */ static struct parse_events_term test_terms[] = { { @@ -78,7 +76,6 @@ static struct parse_events_term test_terms[] = { .type_term = PARSE_EVENTS__TERM_TYPE_USER, }, }; -#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events_term)) /* * Prepare format directory data, exported by kernel @@ -93,7 +90,7 @@ static char *test_format_dir_get(void) if (!mkdtemp(dir)) return NULL; - for (i = 0; i < TEST_FORMATS_CNT; i++) { + for (i = 0; i < ARRAY_SIZE(test_formats); i++) { static char name[PATH_MAX]; struct test_format *format = &test_formats[i]; FILE *file; @@ -130,14 +127,12 @@ static struct list_head *test_terms_list(void) static LIST_HEAD(terms); unsigned int i; - for (i = 0; i < TERMS_CNT; i++) + for (i = 0; i < ARRAY_SIZE(test_terms); i++) list_add_tail(&test_terms[i].list, &terms); return &terms; } -#undef TERMS_CNT - int test__pmu(void) { char *format = test_format_dir_get(); -- cgit v1.2.3-59-g8ed1b From ab1bf653220b37927b838df81042c8355d20bc49 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 18 Jan 2013 17:05:09 -0300 Subject: perf pmu: Privatize perf_pmu_{format,alias} structs They are only used in pmu.c, so no need to make them public in pmu.h. Acked-by: Jiri Olsa Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-3gu6vhyro22ywqcldy0gtegv@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/pmu.c | 14 +++++++++++++- tools/perf/util/pmu.h | 13 ------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 244edbbe1969..4c6f9c490a8d 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -1,4 +1,3 @@ - #include #include #include @@ -11,6 +10,19 @@ #include "parse-events.h" #include "cpumap.h" +struct perf_pmu_alias { + char *name; + struct list_head terms; + struct list_head list; +}; + +struct perf_pmu_format { + char *name; + int value; + DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS); + struct list_head list; +}; + #define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/" int perf_pmu_parse(struct list_head *list, char *name); diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index e881dd180acb..32fe55b659fa 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -12,19 +12,6 @@ enum { #define PERF_PMU_FORMAT_BITS 64 -struct perf_pmu_format { - char *name; - int value; - DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS); - struct list_head list; -}; - -struct perf_pmu_alias { - char *name; - struct list_head terms; - struct list_head list; -}; - struct perf_pmu { char *name; __u32 type; -- cgit v1.2.3-59-g8ed1b From 2a16bf8c1333aa5107ce6474c4df9988d62b41d3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 15:18:54 -0300 Subject: perf tools: Remove some needless die() calls from the main routine Some would just call exit() anyway right after calling die() and the main routine doesn't have to call it, just return the exit value. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-nzq0sdur6oq6lgkt2ipf4o8s@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/perf.c | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 0f661fbce6a8..095b88207cd3 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -328,14 +328,23 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) return 0; + status = 1; /* Check for ENOSPC and EIO errors.. */ - if (fflush(stdout)) - die("write failure on standard output: %s", strerror(errno)); - if (ferror(stdout)) - die("unknown write failure on standard output"); - if (fclose(stdout)) - die("close failed on standard output: %s", strerror(errno)); - return 0; + if (fflush(stdout)) { + fprintf(stderr, "write failure on standard output: %s", strerror(errno)); + goto out; + } + if (ferror(stdout)) { + fprintf(stderr, "unknown write failure on standard output"); + goto out; + } + if (fclose(stdout)) { + fprintf(stderr, "close failed on standard output: %s", strerror(errno)); + goto out; + } + status = 0; +out: + return status; } static void handle_internal_command(int argc, const char **argv) @@ -467,7 +476,8 @@ int main(int argc, const char **argv) cmd += 5; argv[0] = cmd; handle_internal_command(argc, argv); - die("cannot handle %s internally", cmd); + fprintf(stderr, "cannot handle %s internally", cmd); + goto out; } /* Look for flags.. */ @@ -485,7 +495,7 @@ int main(int argc, const char **argv) printf("\n usage: %s\n\n", perf_usage_string); list_common_cmds_help(); printf("\n %s\n\n", perf_more_info_string); - exit(1); + goto out; } cmd = argv[0]; @@ -517,7 +527,7 @@ int main(int argc, const char **argv) fprintf(stderr, "Expansion of alias '%s' failed; " "'%s' is not a perf-command\n", cmd, argv[0]); - exit(1); + goto out; } if (!done_help) { cmd = argv[0] = help_unknown_cmd(cmd); @@ -528,6 +538,6 @@ int main(int argc, const char **argv) fprintf(stderr, "Failed to run command '%s': %s\n", cmd, strerror(errno)); - +out: return 1; } -- cgit v1.2.3-59-g8ed1b From 10ee9fa3b92dece36209a3786d7bd558d459635f Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 18 Jan 2013 13:51:25 -0600 Subject: perf tools: Reinstate 'signed' field flag for tracepoints For some reason the libtraceevent tracepoint-parsing code is missing the FIELD_IS_SIGNED flag-setting code, which causes problems for the Perl trace event binding at least, since it ends up unable to recognize negative numbers. Things like checking for negative return values therefore fail, causing scripts like rwtop to instead interpret the negative return value as a large positive value, which in turn get added to e.g. read totals with insanely invalid results. So set the FIELD_IS_SIGNED flag for tracepoint events that specify "signed:1". Before: # perf script record rw-by-pid # perf script report rw-by-pid read counts by pid: pid comm # reads bytes_requested bytes_read ------ -------------------- ----------- ---------- ---------- 753 Xorg 88 512000 7.74763251095801e+20 1619 firefox 42 462 2.58254417031934e+20 1232 gnome-shell 11 176 1.10680464442257e+20 1471 gnome-terminal 3 16366 18446744073709551615 1408 libsocialweb-co 2 32 18446744073709551613 After: # perf script report rw-by-pid read counts by pid: pid comm # reads bytes_requested bytes_read ------ -------------------- ----------- ---------- ---------- 753 Xorg 88 512000 2764 1619 firefox 42 462 126 1232 gnome-shell 11 176 40 1471 gnome-terminal 3 16366 10 1408 libsocialweb-co 2 32 8 Signed-off-by: Tom Zanussi Link: http://lkml.kernel.org/r/1471b5968821a455cf5168bb4567964e74ecf530.1358527965.git.tom.zanussi@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/traceevent/event-parse.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index f5046191d086..bb8b3db0e583 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -1462,7 +1462,8 @@ static int event_read_fields(struct event_format *event, struct format_field **f if (read_expect_type(EVENT_ITEM, &token)) goto fail; - /* add signed type */ + if (strtoul(token, NULL, 0)) + field->flags |= FIELD_IS_SIGNED; free_token(token); if (read_expected(EVENT_OP, ";") < 0) -- cgit v1.2.3-59-g8ed1b From bdb71db29d50b61f45459b8facfb876768e8cacc Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 18 Jan 2013 13:51:26 -0600 Subject: perf script: Don't display trace info when invoking scripts Only display the trace info if using the default event display. When invoking scripts we assume they have complete control of what's displayed so we shouldn't unconditionally display the trace info, and when generating scripts we don't expect to see trace info obscuring the output message. Signed-off-by: Tom Zanussi Link: http://lkml.kernel.org/r/12ec084ef2870178915c907d16cd1dfa19fbb39e.1358527965.git.tom.zanussi@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index fee4c9a8aee5..92d4658f56fb 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -1475,7 +1475,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) return -1; } - perf_session__fprintf_info(session, stdout, show_full_info); + if (!script_name && !generate_script_lang) + perf_session__fprintf_info(session, stdout, show_full_info); if (!no_callchain) symbol_conf.use_callchain = true; -- cgit v1.2.3-59-g8ed1b From 2de9533d6d61d3086a7079bf142d2bfa374e664e Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 18 Jan 2013 13:51:27 -0600 Subject: perf script: hook up perf_scripting_context->pevent Running the check-perf-trace scripts causes segfaults in both the Perl and Python cases: # perf script record check-perf-trace # perf script -s libexec/perf-core/scripts/python/check-perf-trace.py trace_begin Segmentation fault (core dumped) The reason is that the 'pevent' field was added to perf_scripting_context but it wasn't hooked up with an actual pevent in either case, so when one of the 'common' fields is accessed (in util/trace-event-parse.c:get_common_fields()), pevent->events tries to dereference a NULL pointer. This sets the pevent field when the scripting context is set up. Signed-off-by: Tom Zanussi Link: http://lkml.kernel.org/r/d2b1b8166a6ca0a36e1f5255b88a8289058ba236.1358527965.git.tom.zanussi@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/scripting-engines/trace-event-perl.c | 1 + tools/perf/util/scripting-engines/trace-event-python.c | 1 + 2 files changed, 2 insertions(+) diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index f80605eb1855..eacec859f299 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -292,6 +292,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, ns = nsecs - s * NSECS_PER_SEC; scripting_context->event_data = data; + scripting_context->pevent = evsel->tp_format->pevent; ENTER; SAVETMPS; diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 14683dfca2ee..e87aa5d9696b 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -265,6 +265,7 @@ static void python_process_tracepoint(union perf_event *perf_event ns = nsecs - s * NSECS_PER_SEC; scripting_context->event_data = data; + scripting_context->pevent = evsel->tp_format->pevent; context = PyCObject_FromVoidPtr(scripting_context, NULL); -- cgit v1.2.3-59-g8ed1b From 1de7b7e89d16e3daf32fb3b6f214d038ab2ed879 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 18 Jan 2013 13:51:28 -0600 Subject: perf script: Remove workqueue-stats script The tracepoints used by the workqueue-stats script no longer exist so trying to run the script results in: # perf script record workqueue-stats invalid or unsupported event: 'workqueue:workqueue_creation' Run 'perf list' for a list of valid events So remove the script until it can be reworked using the new workqueue tracepoints. Signed-off-by: Tom Zanussi Link: http://lkml.kernel.org/r/e7a7637d5df9df86887c3bff7683574665ec5360.1358527965.git.tom.zanussi@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-script-python.txt | 2 - tools/perf/scripts/perl/bin/workqueue-stats-record | 2 - tools/perf/scripts/perl/bin/workqueue-stats-report | 3 - tools/perf/scripts/perl/workqueue-stats.pl | 129 --------------------- 4 files changed, 136 deletions(-) delete mode 100644 tools/perf/scripts/perl/bin/workqueue-stats-record delete mode 100644 tools/perf/scripts/perl/bin/workqueue-stats-report delete mode 100644 tools/perf/scripts/perl/workqueue-stats.pl diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt index a4027f221a53..9f1f054b8432 100644 --- a/tools/perf/Documentation/perf-script-python.txt +++ b/tools/perf/Documentation/perf-script-python.txt @@ -336,7 +336,6 @@ scripts listed by the 'perf script -l' command e.g.: ---- root@tropicana:~# perf script -l List of available trace scripts: - workqueue-stats workqueue stats (ins/exe/create/destroy) wakeup-latency system-wide min/max/avg wakeup latency rw-by-file r/w activity for a program, by file rw-by-pid system-wide r/w activity @@ -402,7 +401,6 @@ should show a new entry for your script: ---- root@tropicana:~# perf script -l List of available trace scripts: - workqueue-stats workqueue stats (ins/exe/create/destroy) wakeup-latency system-wide min/max/avg wakeup latency rw-by-file r/w activity for a program, by file rw-by-pid system-wide r/w activity diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record deleted file mode 100644 index 8edda9078d5d..000000000000 --- a/tools/perf/scripts/perl/bin/workqueue-stats-record +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@ diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report deleted file mode 100644 index 6d91411d248c..000000000000 --- a/tools/perf/scripts/perl/bin/workqueue-stats-report +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -# description: workqueue stats (ins/exe/create/destroy) -perf script $@ -s "$PERF_EXEC_PATH"/scripts/perl/workqueue-stats.pl diff --git a/tools/perf/scripts/perl/workqueue-stats.pl b/tools/perf/scripts/perl/workqueue-stats.pl deleted file mode 100644 index a8eaff5119e0..000000000000 --- a/tools/perf/scripts/perl/workqueue-stats.pl +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/perl -w -# (c) 2009, Tom Zanussi -# Licensed under the terms of the GNU GPL License version 2 - -# Displays workqueue stats -# -# Usage: -# -# perf record -c 1 -f -a -R -e workqueue:workqueue_creation -e -# workqueue:workqueue_destruction -e workqueue:workqueue_execution -# -e workqueue:workqueue_insertion -# -# perf script -p -s tools/perf/scripts/perl/workqueue-stats.pl - -use 5.010000; -use strict; -use warnings; - -use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; -use lib "./Perf-Trace-Util/lib"; -use Perf::Trace::Core; -use Perf::Trace::Util; - -my @cpus; - -sub workqueue::workqueue_destruction -{ - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, - $common_pid, $common_comm, - $thread_comm, $thread_pid) = @_; - - $cpus[$common_cpu]{$thread_pid}{destroyed}++; - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; -} - -sub workqueue::workqueue_creation -{ - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, - $common_pid, $common_comm, - $thread_comm, $thread_pid, $cpu) = @_; - - $cpus[$common_cpu]{$thread_pid}{created}++; - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; -} - -sub workqueue::workqueue_execution -{ - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, - $common_pid, $common_comm, - $thread_comm, $thread_pid, $func) = @_; - - $cpus[$common_cpu]{$thread_pid}{executed}++; - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; -} - -sub workqueue::workqueue_insertion -{ - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, - $common_pid, $common_comm, - $thread_comm, $thread_pid, $func) = @_; - - $cpus[$common_cpu]{$thread_pid}{inserted}++; - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; -} - -sub trace_end -{ - print "workqueue work stats:\n\n"; - my $cpu = 0; - printf("%3s %6s %6s\t%-20s\n", "cpu", "ins", "exec", "name"); - printf("%3s %6s %6s\t%-20s\n", "---", "---", "----", "----"); - foreach my $pidhash (@cpus) { - while ((my $pid, my $wqhash) = each %$pidhash) { - my $ins = $$wqhash{'inserted'} || 0; - my $exe = $$wqhash{'executed'} || 0; - my $comm = $$wqhash{'comm'} || ""; - if ($ins || $exe) { - printf("%3u %6u %6u\t%-20s\n", $cpu, $ins, $exe, $comm); - } - } - $cpu++; - } - - $cpu = 0; - print "\nworkqueue lifecycle stats:\n\n"; - printf("%3s %6s %6s\t%-20s\n", "cpu", "created", "destroyed", "name"); - printf("%3s %6s %6s\t%-20s\n", "---", "-------", "---------", "----"); - foreach my $pidhash (@cpus) { - while ((my $pid, my $wqhash) = each %$pidhash) { - my $created = $$wqhash{'created'} || 0; - my $destroyed = $$wqhash{'destroyed'} || 0; - my $comm = $$wqhash{'comm'} || ""; - if ($created || $destroyed) { - printf("%3u %6u %6u\t%-20s\n", $cpu, $created, $destroyed, - $comm); - } - } - $cpu++; - } - - print_unhandled(); -} - -my %unhandled; - -sub print_unhandled -{ - if ((scalar keys %unhandled) == 0) { - return; - } - - print "\nunhandled events:\n\n"; - - printf("%-40s %10s\n", "event", "count"); - printf("%-40s %10s\n", "----------------------------------------", - "-----------"); - - foreach my $event_name (keys %unhandled) { - printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); - } -} - -sub trace_unhandled -{ - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, - $common_pid, $common_comm) = @_; - - $unhandled{$event_name}++; -} -- cgit v1.2.3-59-g8ed1b From 5a3d04d6dc9050b4b4562f5c66aea23f0aa1c003 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 16:10:42 -0300 Subject: perf tools: Allow passing NULL to intlist__find So that we can work with optional parameters that may not set up an intlist. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-e9tmvgdzehqrza11zs0nbg7g@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/intlist.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c index 9d0740024ba8..cbf0c32ce403 100644 --- a/tools/perf/util/intlist.c +++ b/tools/perf/util/intlist.c @@ -59,9 +59,14 @@ void intlist__remove(struct intlist *ilist, struct int_node *node) struct int_node *intlist__find(struct intlist *ilist, int i) { - struct int_node *node = NULL; - struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); + struct int_node *node; + struct rb_node *rb_node; + + if (ilist == NULL) + return NULL; + node = NULL; + rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); if (rb_node) node = container_of(rb_node, struct int_node, rb_node); -- cgit v1.2.3-59-g8ed1b From ffe0fb769a6db3b6027d9228b6fecb6b352e4834 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 16:17:27 -0300 Subject: perf tools: Allow passing a list to intlist__new Just like strlist allows passing a list of entries to parse. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-em50vqvvmlnc6k9tw4xtixus@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-top.c | 2 +- tools/perf/util/intlist.c | 27 ++++++++++++++++++++++++++- tools/perf/util/intlist.h | 2 +- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index e05ba817f8ce..7978c8117b7f 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -694,7 +694,7 @@ static void perf_event__process_sample(struct perf_tool *tool, static struct intlist *seen; if (!seen) - seen = intlist__new(); + seen = intlist__new(NULL); if (!intlist__has_entry(seen, event->ip.pid)) { pr_err("Can't find guest [%d]'s kernel information\n", diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c index cbf0c32ce403..11a8d86f7fea 100644 --- a/tools/perf/util/intlist.c +++ b/tools/perf/util/intlist.c @@ -73,7 +73,26 @@ struct int_node *intlist__find(struct intlist *ilist, int i) return node; } -struct intlist *intlist__new(void) +static int intlist__parse_list(struct intlist *ilist, const char *s) +{ + char *sep; + int err; + + do { + long value = strtol(s, &sep, 10); + err = -EINVAL; + if (*sep != ',' && *sep != '\0') + break; + err = intlist__add(ilist, value); + if (err) + break; + s = sep + 1; + } while (*sep != '\0'); + + return err; +} + +struct intlist *intlist__new(const char *slist) { struct intlist *ilist = malloc(sizeof(*ilist)); @@ -82,9 +101,15 @@ struct intlist *intlist__new(void) ilist->rblist.node_cmp = intlist__node_cmp; ilist->rblist.node_new = intlist__node_new; ilist->rblist.node_delete = intlist__node_delete; + + if (slist && intlist__parse_list(ilist, slist)) + goto out_delete; } return ilist; +out_delete: + intlist__delete(ilist); + return NULL; } void intlist__delete(struct intlist *ilist) diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h index 6d63ab90db50..62351dad848f 100644 --- a/tools/perf/util/intlist.h +++ b/tools/perf/util/intlist.h @@ -15,7 +15,7 @@ struct intlist { struct rblist rblist; }; -struct intlist *intlist__new(void); +struct intlist *intlist__new(const char *slist); void intlist__delete(struct intlist *ilist); void intlist__remove(struct intlist *ilist, struct int_node *in); -- cgit v1.2.3-59-g8ed1b From 2ae828786c65ab8f587647bd0f22f8fe00f1f238 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 16:22:55 -0300 Subject: perf test: Allow skipping tests Sometimes a test is problematic for some reason and one wants to skip it, for instance: [root@sandy ~]# perf test 1: vmlinux symtab matches kallsyms : Ok 2: detect open syscall event : Ok 3: detect open syscall event on all cpus : Ok 4: read samples using the mmap interface : Ok 5: parse events tests : Warning: bad op token { Warning: bad op token { Warning: bad op token { Warning: bad op token { Warning: bad op token { Warning: function is_writable_pte not defined Segmentation fault (core dumped) So now we can use -s/--skip while the problematic tests are being fixed, allowing us to test all the other entries: [root@sandy ~]# perf test -s 5 1: vmlinux symtab matches kallsyms : Ok 2: detect open syscall event : Ok 3: detect open syscall event on all cpus : Ok 4: read samples using the mmap interface : Ok 5: parse events tests : Skip (user override) 6: x86 rdpmc test : Ok 7: Validate PERF_RECORD_* events & perf_sample fields : Ok 8: Test perf pmu format parsing : Ok 9: Test dso data interface : Ok 10: roundtrip evsel->name check : Ok 11: Check parsing of sched tracepoints fields : Ok 12: Generate and check syscalls:sys_enter_open event fields: Ok 13: struct perf_event_attr setup : Ok 14: Test matching and linking mutliple hists : Ok 15: Try 'use perf' in python, checking link problems : Ok [root@sandy ~]# Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-klzd8p57jzdryafqkmlppcb1@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-test.txt | 4 ++++ tools/perf/tests/builtin-test.c | 17 +++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt index b24ac40fcd58..d1d3e5121f89 100644 --- a/tools/perf/Documentation/perf-test.txt +++ b/tools/perf/Documentation/perf-test.txt @@ -23,6 +23,10 @@ from 'perf test list'. OPTIONS ------- +-s:: +--skip:: + Tests to skip (comma separater numeric list). + -v:: --verbose:: Be more verbose. diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 6a5dee2377b0..acb98e0e39f2 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -4,6 +4,7 @@ * Builtin regression testing command: ever growing number of sanity tests */ #include "builtin.h" +#include "intlist.h" #include "tests.h" #include "debug.h" #include "color.h" @@ -105,7 +106,7 @@ static bool perf_test__matches(int curr, int argc, const char *argv[]) return false; } -static int __cmd_test(int argc, const char *argv[]) +static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) { int i = 0; int width = 0; @@ -126,6 +127,12 @@ static int __cmd_test(int argc, const char *argv[]) continue; pr_info("%2d: %-*s:", i, width, tests[curr].desc); + + if (intlist__find(skiplist, i)) { + color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n"); + continue; + } + pr_debug("\n--- start ---\n"); err = tests[curr].func(); pr_debug("---- end ----\n%s:", tests[curr].desc); @@ -169,11 +176,14 @@ int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) "perf test [] [{list |[|]}]", NULL, }; + const char *skip = NULL; const struct option test_options[] = { + OPT_STRING('s', "skip", &skip, "tests", "tests to skip"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_END() }; + struct intlist *skiplist = NULL; argc = parse_options(argc, argv, test_options, test_usage, 0); if (argc >= 1 && !strcmp(argv[0], "list")) @@ -186,5 +196,8 @@ int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) if (symbol__init() < 0) return -1; - return __cmd_test(argc, argv); + if (skip != NULL) + skiplist = intlist__new(skip); + + return __cmd_test(argc, argv, skiplist); } -- cgit v1.2.3-59-g8ed1b From b736f48bda54ec75b7dc9306884c3843f1a78a0a Mon Sep 17 00:00:00 2001 From: Josh Triplett Date: Sun, 18 Nov 2012 21:27:45 -0800 Subject: tracing: Mark tracing_dentry_percpu() static Nothing outside of kernel/trace/trace.c references tracing_dentry_percpu(). Link: http://lkml.kernel.org/r/1353302917-13995-7-git-send-email-josh@joshtriplett.org Signed-off-by: Josh Triplett Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d2a658349ca1..ca9b7dfed8ef 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4506,7 +4506,7 @@ struct dentry *tracing_init_dentry(void) static struct dentry *d_percpu; -struct dentry *tracing_dentry_percpu(void) +static struct dentry *tracing_dentry_percpu(void) { static int once; struct dentry *d_tracer; -- cgit v1.2.3-59-g8ed1b From e23c1a5578cf32ed3a7ac9dde59a2de0a52ff812 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 24 Jan 2013 21:46:43 +0100 Subject: tools lib traceevent: Handle dynamic array's element size properly Fixing the dynamic array format field parsing. Currently the event_read_fields function could segfault while parsing dynamic array other than string type. The reason is the event->pevent does not need to be set and gets dereferenced unconditionaly. Also adding proper initialization of field->elementsize based on the parsed dynamic type. Signed-off-by: Jiri Olsa Acked-by: Steven Rostedt Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1359060403-32422-1-git-send-email-jolsa@redhat.com [ committer note: Made a char pointer parameter const, as requested by Steven ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/traceevent/event-parse.c | 39 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index bb8b3db0e583..82b0606dcb8a 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -1223,6 +1223,34 @@ static int field_is_long(struct format_field *field) return 0; } +static unsigned int type_size(const char *name) +{ + /* This covers all FIELD_IS_STRING types. */ + static struct { + const char *type; + unsigned int size; + } table[] = { + { "u8", 1 }, + { "u16", 2 }, + { "u32", 4 }, + { "u64", 8 }, + { "s8", 1 }, + { "s16", 2 }, + { "s32", 4 }, + { "s64", 8 }, + { "char", 1 }, + { }, + }; + int i; + + for (i = 0; table[i].type; i++) { + if (!strcmp(table[i].type, name)) + return table[i].size; + } + + return 0; +} + static int event_read_fields(struct event_format *event, struct format_field **fields) { struct format_field *field = NULL; @@ -1232,6 +1260,8 @@ static int event_read_fields(struct event_format *event, struct format_field **f int count = 0; do { + unsigned int size_dynamic = 0; + type = read_token(&token); if (type == EVENT_NEWLINE) { free_token(token); @@ -1390,6 +1420,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f field->type = new_type; strcat(field->type, " "); strcat(field->type, field->name); + size_dynamic = type_size(field->name); free_token(field->name); strcat(field->type, brackets); field->name = token; @@ -1478,10 +1509,14 @@ static int event_read_fields(struct event_format *event, struct format_field **f if (field->flags & FIELD_IS_ARRAY) { if (field->arraylen) field->elementsize = field->size / field->arraylen; + else if (field->flags & FIELD_IS_DYNAMIC) + field->elementsize = size_dynamic; else if (field->flags & FIELD_IS_STRING) field->elementsize = 1; - else - field->elementsize = event->pevent->long_size; + else if (field->flags & FIELD_IS_LONG) + field->elementsize = event->pevent ? + event->pevent->long_size : + sizeof(long); } else field->elementsize = field->size; -- cgit v1.2.3-59-g8ed1b From d8639f068a59c842882339173f58311a583c555f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 21:59:59 -0300 Subject: perf tools: Stop using 'self' in strlist As suggested by tglx, 'self' should be replaced by something that is more useful. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-933537sxtcz47qs0e0ledmrp@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/strlist.c | 54 +++++++++++++++++++++++------------------------ tools/perf/util/strlist.h | 42 ++++++++++++++++++------------------ 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c index 155d8b7078a7..55433aa42c8f 100644 --- a/tools/perf/util/strlist.c +++ b/tools/perf/util/strlist.c @@ -35,11 +35,11 @@ out_delete: return NULL; } -static void str_node__delete(struct str_node *self, bool dupstr) +static void str_node__delete(struct str_node *snode, bool dupstr) { if (dupstr) - free((void *)self->s); - free(self); + free((void *)snode->s); + free(snode); } static @@ -59,12 +59,12 @@ static int strlist__node_cmp(struct rb_node *rb_node, const void *entry) return strcmp(snode->s, str); } -int strlist__add(struct strlist *self, const char *new_entry) +int strlist__add(struct strlist *slist, const char *new_entry) { - return rblist__add_node(&self->rblist, new_entry); + return rblist__add_node(&slist->rblist, new_entry); } -int strlist__load(struct strlist *self, const char *filename) +int strlist__load(struct strlist *slist, const char *filename) { char entry[1024]; int err; @@ -80,7 +80,7 @@ int strlist__load(struct strlist *self, const char *filename) continue; entry[len - 1] = '\0'; - err = strlist__add(self, entry); + err = strlist__add(slist, entry); if (err != 0) goto out; } @@ -107,56 +107,56 @@ struct str_node *strlist__find(struct strlist *slist, const char *entry) return snode; } -static int strlist__parse_list_entry(struct strlist *self, const char *s) +static int strlist__parse_list_entry(struct strlist *slist, const char *s) { if (strncmp(s, "file://", 7) == 0) - return strlist__load(self, s + 7); + return strlist__load(slist, s + 7); - return strlist__add(self, s); + return strlist__add(slist, s); } -int strlist__parse_list(struct strlist *self, const char *s) +int strlist__parse_list(struct strlist *slist, const char *s) { char *sep; int err; while ((sep = strchr(s, ',')) != NULL) { *sep = '\0'; - err = strlist__parse_list_entry(self, s); + err = strlist__parse_list_entry(slist, s); *sep = ','; if (err != 0) return err; s = sep + 1; } - return *s ? strlist__parse_list_entry(self, s) : 0; + return *s ? strlist__parse_list_entry(slist, s) : 0; } -struct strlist *strlist__new(bool dupstr, const char *slist) +struct strlist *strlist__new(bool dupstr, const char *list) { - struct strlist *self = malloc(sizeof(*self)); + struct strlist *slist = malloc(sizeof(*slist)); - if (self != NULL) { - rblist__init(&self->rblist); - self->rblist.node_cmp = strlist__node_cmp; - self->rblist.node_new = strlist__node_new; - self->rblist.node_delete = strlist__node_delete; + if (slist != NULL) { + rblist__init(&slist->rblist); + slist->rblist.node_cmp = strlist__node_cmp; + slist->rblist.node_new = strlist__node_new; + slist->rblist.node_delete = strlist__node_delete; - self->dupstr = dupstr; - if (slist && strlist__parse_list(self, slist) != 0) + slist->dupstr = dupstr; + if (slist && strlist__parse_list(slist, list) != 0) goto out_error; } - return self; + return slist; out_error: - free(self); + free(slist); return NULL; } -void strlist__delete(struct strlist *self) +void strlist__delete(struct strlist *slist) { - if (self != NULL) - rblist__delete(&self->rblist); + if (slist != NULL) + rblist__delete(&slist->rblist); } struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx) diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h index dd9f922ec67c..5c7f87069d9c 100644 --- a/tools/perf/util/strlist.h +++ b/tools/perf/util/strlist.h @@ -17,34 +17,34 @@ struct strlist { }; struct strlist *strlist__new(bool dupstr, const char *slist); -void strlist__delete(struct strlist *self); +void strlist__delete(struct strlist *slist); -void strlist__remove(struct strlist *self, struct str_node *sn); -int strlist__load(struct strlist *self, const char *filename); -int strlist__add(struct strlist *self, const char *str); +void strlist__remove(struct strlist *slist, struct str_node *sn); +int strlist__load(struct strlist *slist, const char *filename); +int strlist__add(struct strlist *slist, const char *str); -struct str_node *strlist__entry(const struct strlist *self, unsigned int idx); -struct str_node *strlist__find(struct strlist *self, const char *entry); +struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx); +struct str_node *strlist__find(struct strlist *slist, const char *entry); -static inline bool strlist__has_entry(struct strlist *self, const char *entry) +static inline bool strlist__has_entry(struct strlist *slist, const char *entry) { - return strlist__find(self, entry) != NULL; + return strlist__find(slist, entry) != NULL; } -static inline bool strlist__empty(const struct strlist *self) +static inline bool strlist__empty(const struct strlist *slist) { - return rblist__empty(&self->rblist); + return rblist__empty(&slist->rblist); } -static inline unsigned int strlist__nr_entries(const struct strlist *self) +static inline unsigned int strlist__nr_entries(const struct strlist *slist) { - return rblist__nr_entries(&self->rblist); + return rblist__nr_entries(&slist->rblist); } /* For strlist iteration */ -static inline struct str_node *strlist__first(struct strlist *self) +static inline struct str_node *strlist__first(struct strlist *slist) { - struct rb_node *rn = rb_first(&self->rblist.entries); + struct rb_node *rn = rb_first(&slist->rblist.entries); return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; } static inline struct str_node *strlist__next(struct str_node *sn) @@ -59,21 +59,21 @@ static inline struct str_node *strlist__next(struct str_node *sn) /** * strlist_for_each - iterate over a strlist * @pos: the &struct str_node to use as a loop cursor. - * @self: the &struct strlist for loop. + * @slist: the &struct strlist for loop. */ -#define strlist__for_each(pos, self) \ - for (pos = strlist__first(self); pos; pos = strlist__next(pos)) +#define strlist__for_each(pos, slist) \ + for (pos = strlist__first(slist); pos; pos = strlist__next(pos)) /** * strlist_for_each_safe - iterate over a strlist safe against removal of * str_node * @pos: the &struct str_node to use as a loop cursor. * @n: another &struct str_node to use as temporary storage. - * @self: the &struct strlist for loop. + * @slist: the &struct strlist for loop. */ -#define strlist__for_each_safe(pos, n, self) \ - for (pos = strlist__first(self), n = strlist__next(pos); pos;\ +#define strlist__for_each_safe(pos, n, slist) \ + for (pos = strlist__first(slist), n = strlist__next(pos); pos;\ pos = n, n = strlist__next(n)) -int strlist__parse_list(struct strlist *self, const char *s); +int strlist__parse_list(struct strlist *slist, const char *s); #endif /* __PERF_STRLIST_H */ -- cgit v1.2.3-59-g8ed1b From 237a7e04a1a4461843a998fae78517dbbd08602e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 21:59:59 -0300 Subject: perf tools: Stop using 'self' in map.[ch] As suggested by tglx, 'self' should be replaced by something that is more useful. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-vse2c54m0yahx6p79tmoel03@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/map.c | 118 +++++++++++++++++++++++++------------------------- tools/perf/util/map.h | 24 +++++----- 2 files changed, 70 insertions(+), 72 deletions(-) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index ff94425779a2..b6b163642c7d 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -29,29 +29,29 @@ static inline int is_no_dso_memory(const char *filename) !strcmp(filename, "[heap]"); } -void map__init(struct map *self, enum map_type type, +void map__init(struct map *map, enum map_type type, u64 start, u64 end, u64 pgoff, struct dso *dso) { - self->type = type; - self->start = start; - self->end = end; - self->pgoff = pgoff; - self->dso = dso; - self->map_ip = map__map_ip; - self->unmap_ip = map__unmap_ip; - RB_CLEAR_NODE(&self->rb_node); - self->groups = NULL; - self->referenced = false; - self->erange_warned = false; + map->type = type; + map->start = start; + map->end = end; + map->pgoff = pgoff; + map->dso = dso; + map->map_ip = map__map_ip; + map->unmap_ip = map__unmap_ip; + RB_CLEAR_NODE(&map->rb_node); + map->groups = NULL; + map->referenced = false; + map->erange_warned = false; } struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, u64 pgoff, u32 pid, char *filename, enum map_type type) { - struct map *self = malloc(sizeof(*self)); + struct map *map = malloc(sizeof(*map)); - if (self != NULL) { + if (map != NULL) { char newfilename[PATH_MAX]; struct dso *dso; int anon, no_dso, vdso; @@ -74,10 +74,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, if (dso == NULL) goto out_delete; - map__init(self, type, start, start + len, pgoff, dso); + map__init(map, type, start, start + len, pgoff, dso); if (anon || no_dso) { - self->map_ip = self->unmap_ip = identity__map_ip; + map->map_ip = map->unmap_ip = identity__map_ip; /* * Set memory without DSO as loaded. All map__find_* @@ -85,12 +85,12 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, * unnecessary map__load warning. */ if (no_dso) - dso__set_loaded(dso, self->type); + dso__set_loaded(dso, map->type); } } - return self; + return map; out_delete: - free(self); + free(map); return NULL; } @@ -113,48 +113,48 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type) return map; } -void map__delete(struct map *self) +void map__delete(struct map *map) { - free(self); + free(map); } -void map__fixup_start(struct map *self) +void map__fixup_start(struct map *map) { - struct rb_root *symbols = &self->dso->symbols[self->type]; + struct rb_root *symbols = &map->dso->symbols[map->type]; struct rb_node *nd = rb_first(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); - self->start = sym->start; + map->start = sym->start; } } -void map__fixup_end(struct map *self) +void map__fixup_end(struct map *map) { - struct rb_root *symbols = &self->dso->symbols[self->type]; + struct rb_root *symbols = &map->dso->symbols[map->type]; struct rb_node *nd = rb_last(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); - self->end = sym->end; + map->end = sym->end; } } #define DSO__DELETED "(deleted)" -int map__load(struct map *self, symbol_filter_t filter) +int map__load(struct map *map, symbol_filter_t filter) { - const char *name = self->dso->long_name; + const char *name = map->dso->long_name; int nr; - if (dso__loaded(self->dso, self->type)) + if (dso__loaded(map->dso, map->type)) return 0; - nr = dso__load(self->dso, self, filter); + nr = dso__load(map->dso, map, filter); if (nr < 0) { - if (self->dso->has_build_id) { + if (map->dso->has_build_id) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - build_id__sprintf(self->dso->build_id, - sizeof(self->dso->build_id), + build_id__sprintf(map->dso->build_id, + sizeof(map->dso->build_id), sbuild_id); pr_warning("%s with build id %s not found", name, sbuild_id); @@ -184,43 +184,41 @@ int map__load(struct map *self, symbol_filter_t filter) * Only applies to the kernel, as its symtabs aren't relative like the * module ones. */ - if (self->dso->kernel) - map__reloc_vmlinux(self); + if (map->dso->kernel) + map__reloc_vmlinux(map); return 0; } -struct symbol *map__find_symbol(struct map *self, u64 addr, +struct symbol *map__find_symbol(struct map *map, u64 addr, symbol_filter_t filter) { - if (map__load(self, filter) < 0) + if (map__load(map, filter) < 0) return NULL; - return dso__find_symbol(self->dso, self->type, addr); + return dso__find_symbol(map->dso, map->type, addr); } -struct symbol *map__find_symbol_by_name(struct map *self, const char *name, +struct symbol *map__find_symbol_by_name(struct map *map, const char *name, symbol_filter_t filter) { - if (map__load(self, filter) < 0) + if (map__load(map, filter) < 0) return NULL; - if (!dso__sorted_by_name(self->dso, self->type)) - dso__sort_by_name(self->dso, self->type); + if (!dso__sorted_by_name(map->dso, map->type)) + dso__sort_by_name(map->dso, map->type); - return dso__find_symbol_by_name(self->dso, self->type, name); + return dso__find_symbol_by_name(map->dso, map->type, name); } -struct map *map__clone(struct map *self) +struct map *map__clone(struct map *map) { - struct map *map = malloc(sizeof(*self)); + struct map *clone = malloc(sizeof(*clone)); - if (!map) - return NULL; + if (clone != NULL) + memcpy(clone, map, sizeof(*clone)); - memcpy(map, self, sizeof(*self)); - - return map; + return clone; } int map__overlap(struct map *l, struct map *r) @@ -237,10 +235,10 @@ int map__overlap(struct map *l, struct map *r) return 0; } -size_t map__fprintf(struct map *self, FILE *fp) +size_t map__fprintf(struct map *map, FILE *fp) { return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", - self->start, self->end, self->pgoff, self->dso->name); + map->start, map->end, map->pgoff, map->dso->name); } size_t map__fprintf_dsoname(struct map *map, FILE *fp) @@ -528,9 +526,9 @@ static u64 map__reloc_unmap_ip(struct map *map, u64 ip) return ip - (s64)map->pgoff; } -void map__reloc_vmlinux(struct map *self) +void map__reloc_vmlinux(struct map *map) { - struct kmap *kmap = map__kmap(self); + struct kmap *kmap = map__kmap(map); s64 reloc; if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr) @@ -542,9 +540,9 @@ void map__reloc_vmlinux(struct map *self) if (!reloc) return; - self->map_ip = map__reloc_map_ip; - self->unmap_ip = map__reloc_unmap_ip; - self->pgoff = reloc; + map->map_ip = map__reloc_map_ip; + map->unmap_ip = map__reloc_unmap_ip; + map->pgoff = reloc; } void maps__insert(struct rb_root *maps, struct map *map) @@ -567,9 +565,9 @@ void maps__insert(struct rb_root *maps, struct map *map) rb_insert_color(&map->rb_node, maps); } -void maps__remove(struct rb_root *self, struct map *map) +void maps__remove(struct rb_root *maps, struct map *map) { - rb_erase(&map->rb_node, self); + rb_erase(&map->rb_node, maps); } struct map *maps__find(struct rb_root *maps, u64 ip) diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index bcb39e2a6965..a887f2c9dfbb 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -57,9 +57,9 @@ struct map_groups { struct machine *machine; }; -static inline struct kmap *map__kmap(struct map *self) +static inline struct kmap *map__kmap(struct map *map) { - return (struct kmap *)(self + 1); + return (struct kmap *)(map + 1); } static inline u64 map__map_ip(struct map *map, u64 ip) @@ -85,27 +85,27 @@ struct symbol; typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); -void map__init(struct map *self, enum map_type type, +void map__init(struct map *map, enum map_type type, u64 start, u64 end, u64 pgoff, struct dso *dso); struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, u64 pgoff, u32 pid, char *filename, enum map_type type); struct map *map__new2(u64 start, struct dso *dso, enum map_type type); -void map__delete(struct map *self); -struct map *map__clone(struct map *self); +void map__delete(struct map *map); +struct map *map__clone(struct map *map); int map__overlap(struct map *l, struct map *r); -size_t map__fprintf(struct map *self, FILE *fp); +size_t map__fprintf(struct map *map, FILE *fp); size_t map__fprintf_dsoname(struct map *map, FILE *fp); -int map__load(struct map *self, symbol_filter_t filter); -struct symbol *map__find_symbol(struct map *self, +int map__load(struct map *map, symbol_filter_t filter); +struct symbol *map__find_symbol(struct map *map, u64 addr, symbol_filter_t filter); -struct symbol *map__find_symbol_by_name(struct map *self, const char *name, +struct symbol *map__find_symbol_by_name(struct map *map, const char *name, symbol_filter_t filter); -void map__fixup_start(struct map *self); -void map__fixup_end(struct map *self); +void map__fixup_start(struct map *map); +void map__fixup_end(struct map *map); -void map__reloc_vmlinux(struct map *self); +void map__reloc_vmlinux(struct map *map); size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, int verbose, FILE *fp); -- cgit v1.2.3-59-g8ed1b From 8e16017d497e9bb37c8c3c5ed1edb8d6adeebf3a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 22:16:43 -0300 Subject: perf tools: Use memdup in map__clone We have memdup() exactly for that, remove open coded dup. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-tnsoexrgv6u9l125srq2c7su@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/map.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index b6b163642c7d..6fcb9de62340 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -11,6 +11,7 @@ #include "strlist.h" #include "vdso.h" #include "build-id.h" +#include const char *map_type__name[MAP__NR_TYPES] = { [MAP__FUNCTION] = "Functions", @@ -213,12 +214,7 @@ struct symbol *map__find_symbol_by_name(struct map *map, const char *name, struct map *map__clone(struct map *map) { - struct map *clone = malloc(sizeof(*clone)); - - if (clone != NULL) - memcpy(clone, map, sizeof(*clone)); - - return clone; + return memdup(map, sizeof(*map)); } int map__overlap(struct map *l, struct map *r) -- cgit v1.2.3-59-g8ed1b From 8d9233f205e8855dc762665e28012354cd46af45 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 22:24:57 -0300 Subject: perf kmem: Use memdup() Instead of hand coded equivalent. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-42ldngi973f4ssvzlklo8t2k@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-kmem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index c746108c5d48..46878daca5cc 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -17,6 +17,7 @@ #include "util/debug.h" #include +#include struct alloc_stat; typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); @@ -618,12 +619,11 @@ static int sort_dimension__add(const char *tok, struct list_head *list) for (i = 0; i < NUM_AVAIL_SORTS; i++) { if (!strcmp(avail_sorts[i]->name, tok)) { - sort = malloc(sizeof(*sort)); + sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i])); if (!sort) { - pr_err("%s: malloc failed\n", __func__); + pr_err("%s: memdup failed\n", __func__); return -1; } - memcpy(sort, avail_sorts[i], sizeof(*sort)); list_add_tail(&sort->list, list); return 0; } -- cgit v1.2.3-59-g8ed1b From 2caa48a24061b1f8e8dab43ea3292a608a15e3c9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 22:34:33 -0300 Subject: perf header: Stop using die() calls when processing tracing data The callers of this function (perf_event__process_tracing_data) already handles a negative value return as error, so just use pr_err() to log the problem and return -1 instead of panic'ing. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-eeeljnecpi0zi5s7ux1mzdv9@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index fccd69dbbbb9..e17a8fe9c4d7 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2921,16 +2921,22 @@ int perf_event__process_tracing_data(union perf_event *event, session->repipe); padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; - if (readn(session->fd, buf, padding) < 0) - die("reading input file"); + if (readn(session->fd, buf, padding) < 0) { + pr_err("%s: reading input file", __func__); + return -1; + } if (session->repipe) { int retw = write(STDOUT_FILENO, buf, padding); - if (retw <= 0 || retw != padding) - die("repiping tracing data padding"); + if (retw <= 0 || retw != padding) { + pr_err("%s: repiping tracing data padding", __func__); + return -1; + } } - if (size_read + padding != size) - die("tracing data size mismatch"); + if (size_read + padding != size) { + pr_err("%s: tracing data size mismatch", __func__); + return -1; + } perf_evlist__prepare_tracepoint_events(session->evlist, session->pevent); -- cgit v1.2.3-59-g8ed1b From 56ab7140325c835c8bb53cf3bca0cf7b6e967f15 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 22:45:21 -0300 Subject: perf ui browser: Free browser->helpline() on ui_browser__hide() It is allocated at ui_browser__show(), so free it in its counterpart, ui_browser__hide(). Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-g449kvnbcpli4ceyxbe2jp1e@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browser.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index 588bcb2d008b..809ea4632a34 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c @@ -273,6 +273,8 @@ void ui_browser__hide(struct ui_browser *browser __maybe_unused) { pthread_mutex_lock(&ui__lock); ui_helpline__pop(); + free(browser->helpline); + browser->helpline = NULL; pthread_mutex_unlock(&ui__lock); } -- cgit v1.2.3-59-g8ed1b From c0aab59f67ef47e52d151464c8dd16e7ae58d053 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Jan 2013 23:01:50 -0300 Subject: perf tests: Call machine__exit in the vmlinux matches kallsyms test Removing leaks with valgrind. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-x9hja7wxwexe8ca9v2j8qtlg@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/vmlinux-kallsyms.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c index a1a8442829b4..7b4c4d26d1ba 100644 --- a/tools/perf/tests/vmlinux-kallsyms.c +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -44,7 +44,7 @@ int test__vmlinux_matches_kallsyms(void) */ if (machine__create_kernel_maps(&kallsyms) < 0) { pr_debug("machine__create_kernel_maps "); - return -1; + goto out; } /* @@ -227,5 +227,7 @@ detour: map__fprintf(pos, stderr); } out: + machine__exit(&kallsyms); + machine__exit(&vmlinux); return err; } -- cgit v1.2.3-59-g8ed1b From d75f717e19fe595e7efbf67de195ada8d89dfbbe Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 25 Jan 2013 09:46:36 -0500 Subject: tracing: Remove tracepoint sample code The tracepoint sample code was used to teach developers how to create their own tracepoints. But now the trace_events have been added as a higher level that is used directly by developers today. Only the trace_event code should use the tracepoint interface directly and no new tracepoints should be added. Besides, the example had a race condition with the use of the ->d_name.name dentry field, as pointed out by Al Viro. Best just to remove the code so it wont be used by other developers. Link: http://lkml.kernel.org/r/20130123225523.GY4939@ZenIV.linux.org.uk Cc: Al Viro Acked-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- samples/Kconfig | 6 --- samples/Makefile | 2 +- samples/tracepoints/Makefile | 6 --- samples/tracepoints/tp-samples-trace.h | 11 ----- samples/tracepoints/tracepoint-probe-sample.c | 57 -------------------------- samples/tracepoints/tracepoint-probe-sample2.c | 44 -------------------- samples/tracepoints/tracepoint-sample.c | 57 -------------------------- 7 files changed, 1 insertion(+), 182 deletions(-) delete mode 100644 samples/tracepoints/Makefile delete mode 100644 samples/tracepoints/tp-samples-trace.h delete mode 100644 samples/tracepoints/tracepoint-probe-sample.c delete mode 100644 samples/tracepoints/tracepoint-probe-sample2.c delete mode 100644 samples/tracepoints/tracepoint-sample.c diff --git a/samples/Kconfig b/samples/Kconfig index 7b6792a18c05..6181c2cc9ca0 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -5,12 +5,6 @@ menuconfig SAMPLES if SAMPLES -config SAMPLE_TRACEPOINTS - tristate "Build tracepoints examples -- loadable modules only" - depends on TRACEPOINTS && m - help - This build tracepoints example modules. - config SAMPLE_TRACE_EVENTS tristate "Build trace_events examples -- loadable modules only" depends on EVENT_TRACING && m diff --git a/samples/Makefile b/samples/Makefile index 5ef08bba96ce..1a60c62e2045 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -1,4 +1,4 @@ # Makefile for Linux samples code -obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ tracepoints/ trace_events/ \ +obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ \ hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ diff --git a/samples/tracepoints/Makefile b/samples/tracepoints/Makefile deleted file mode 100644 index 36479ad9ae14..000000000000 --- a/samples/tracepoints/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# builds the tracepoint example kernel modules; -# then to use one (as root): insmod - -obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-sample.o -obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample.o -obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample2.o diff --git a/samples/tracepoints/tp-samples-trace.h b/samples/tracepoints/tp-samples-trace.h deleted file mode 100644 index 4d46be965961..000000000000 --- a/samples/tracepoints/tp-samples-trace.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _TP_SAMPLES_TRACE_H -#define _TP_SAMPLES_TRACE_H - -#include /* for struct inode and struct file */ -#include - -DECLARE_TRACE(subsys_event, - TP_PROTO(struct inode *inode, struct file *file), - TP_ARGS(inode, file)); -DECLARE_TRACE_NOARGS(subsys_eventb); -#endif diff --git a/samples/tracepoints/tracepoint-probe-sample.c b/samples/tracepoints/tracepoint-probe-sample.c deleted file mode 100644 index 744c0b9652a7..000000000000 --- a/samples/tracepoints/tracepoint-probe-sample.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * tracepoint-probe-sample.c - * - * sample tracepoint probes. - */ - -#include -#include -#include -#include "tp-samples-trace.h" - -/* - * Here the caller only guarantees locking for struct file and struct inode. - * Locking must therefore be done in the probe to use the dentry. - */ -static void probe_subsys_event(void *ignore, - struct inode *inode, struct file *file) -{ - path_get(&file->f_path); - dget(file->f_path.dentry); - printk(KERN_INFO "Event is encountered with filename %s\n", - file->f_path.dentry->d_name.name); - dput(file->f_path.dentry); - path_put(&file->f_path); -} - -static void probe_subsys_eventb(void *ignore) -{ - printk(KERN_INFO "Event B is encountered\n"); -} - -static int __init tp_sample_trace_init(void) -{ - int ret; - - ret = register_trace_subsys_event(probe_subsys_event, NULL); - WARN_ON(ret); - ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL); - WARN_ON(ret); - - return 0; -} - -module_init(tp_sample_trace_init); - -static void __exit tp_sample_trace_exit(void) -{ - unregister_trace_subsys_eventb(probe_subsys_eventb, NULL); - unregister_trace_subsys_event(probe_subsys_event, NULL); - tracepoint_synchronize_unregister(); -} - -module_exit(tp_sample_trace_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Tracepoint Probes Samples"); diff --git a/samples/tracepoints/tracepoint-probe-sample2.c b/samples/tracepoints/tracepoint-probe-sample2.c deleted file mode 100644 index 9fcf990e5d4b..000000000000 --- a/samples/tracepoints/tracepoint-probe-sample2.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * tracepoint-probe-sample2.c - * - * 2nd sample tracepoint probes. - */ - -#include -#include -#include "tp-samples-trace.h" - -/* - * Here the caller only guarantees locking for struct file and struct inode. - * Locking must therefore be done in the probe to use the dentry. - */ -static void probe_subsys_event(void *ignore, - struct inode *inode, struct file *file) -{ - printk(KERN_INFO "Event is encountered with inode number %lu\n", - inode->i_ino); -} - -static int __init tp_sample_trace_init(void) -{ - int ret; - - ret = register_trace_subsys_event(probe_subsys_event, NULL); - WARN_ON(ret); - - return 0; -} - -module_init(tp_sample_trace_init); - -static void __exit tp_sample_trace_exit(void) -{ - unregister_trace_subsys_event(probe_subsys_event, NULL); - tracepoint_synchronize_unregister(); -} - -module_exit(tp_sample_trace_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Tracepoint Probes Samples"); diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c deleted file mode 100644 index f4d89e008c32..000000000000 --- a/samples/tracepoints/tracepoint-sample.c +++ /dev/null @@ -1,57 +0,0 @@ -/* tracepoint-sample.c - * - * Executes a tracepoint when /proc/tracepoint-sample is opened. - * - * (C) Copyright 2007 Mathieu Desnoyers - * - * This file is released under the GPLv2. - * See the file COPYING for more details. - */ - -#include -#include -#include -#include "tp-samples-trace.h" - -DEFINE_TRACE(subsys_event); -DEFINE_TRACE(subsys_eventb); - -struct proc_dir_entry *pentry_sample; - -static int my_open(struct inode *inode, struct file *file) -{ - int i; - - trace_subsys_event(inode, file); - for (i = 0; i < 10; i++) - trace_subsys_eventb(); - return -EPERM; -} - -static const struct file_operations mark_ops = { - .open = my_open, - .llseek = noop_llseek, -}; - -static int __init sample_init(void) -{ - printk(KERN_ALERT "sample init\n"); - pentry_sample = proc_create("tracepoint-sample", 0444, NULL, - &mark_ops); - if (!pentry_sample) - return -EPERM; - return 0; -} - -static void __exit sample_exit(void) -{ - printk(KERN_ALERT "sample exit\n"); - remove_proc_entry("tracepoint-sample", NULL); -} - -module_init(sample_init) -module_exit(sample_exit) - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Tracepoint sample"); -- cgit v1.2.3-59-g8ed1b From 821465295b36136998ef294fe176fba4e09c1cd9 Mon Sep 17 00:00:00 2001 From: Shan Wei Date: Mon, 19 Nov 2012 13:21:01 +0800 Subject: tracing: Use __this_cpu_inc/dec operation instead of __get_cpu_var __this_cpu_inc_return() or __this_cpu_dec generates a single instruction, which is faster than __get_cpu_var operation. Link: http://lkml.kernel.org/r/50A9C1BD.1060308@gmail.com Reviewed-by: Christoph Lameter Signed-off-by: Shan Wei Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ca9b7dfed8ef..07888e15c694 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1344,7 +1344,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, */ preempt_disable_notrace(); - use_stack = ++__get_cpu_var(ftrace_stack_reserve); + use_stack = __this_cpu_inc_return(ftrace_stack_reserve); /* * We don't need any atomic variables, just a barrier. * If an interrupt comes in, we don't care, because it would @@ -1398,7 +1398,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, out: /* Again, don't let gcc optimize things here */ barrier(); - __get_cpu_var(ftrace_stack_reserve)--; + __this_cpu_dec(ftrace_stack_reserve); preempt_enable_notrace(); } -- cgit v1.2.3-59-g8ed1b From 38dbe0b137bfe6ea92be495017885c0785179a02 Mon Sep 17 00:00:00 2001 From: Jovi Zhang Date: Fri, 25 Jan 2013 18:03:07 +0800 Subject: tracing: Remove second iterator initializer The trace iterator is already initialized by trace_init_global_iter(), so there is no need to initialize it again. Link: http://lkml.kernel.org/r/CACV3sb+G1YnO6168JhY3dEadmJi58pA5-2cSZT8E0WVHJNFt9Q@mail.gmail.com Signed-off-by: Jovi Zhang Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 07888e15c694..d399592701aa 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5030,6 +5030,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) if (disable_tracing) ftrace_kill(); + /* Simulate the iterator */ trace_init_global_iter(&iter); for_each_tracing_cpu(cpu) { @@ -5041,10 +5042,6 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) /* don't look at user memory in panic mode */ trace_flags &= ~TRACE_ITER_SYM_USEROBJ; - /* Simulate the iterator */ - iter.tr = &global_trace; - iter.trace = current_trace; - switch (oops_dump_mode) { case DUMP_ALL: iter.cpu_file = TRACE_PIPE_ALL_CPU; -- cgit v1.2.3-59-g8ed1b From 03274a3ffb449632970fdd35da72ea41cf8474da Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 29 Jan 2013 17:30:31 -0500 Subject: tracing/fgraph: Adjust fgraph depth before calling trace return callback While debugging the virtual cputime with the function graph tracer with a max_depth of 1 (most common use of the max_depth so far), I found that I was missing kernel execution because of a race condition. The code for the return side of the function has a slight race: ftrace_pop_return_trace(&trace, &ret, frame_pointer); trace.rettime = trace_clock_local(); ftrace_graph_return(&trace); barrier(); current->curr_ret_stack--; The ftrace_pop_return_trace() initializes the trace structure for the callback. The ftrace_graph_return() uses the trace structure for its own use as that structure is on the stack and is local to this function. Then the curr_ret_stack is decremented which is what the trace.depth is set to. If an interrupt comes in after the ftrace_graph_return() but before the curr_ret_stack, then the called function will get a depth of 2. If max_depth is set to 1 this function will be ignored. The problem is that the trace has already been called, and the timestamp for that trace will not reflect the time the function was about to re-enter userspace. Calls to the interrupt will not be traced because the max_depth has prevented this. To solve this issue, the ftrace_graph_return() can safely be moved after the current->curr_ret_stack has been updated. This way the timestamp for the return callback will reflect the actual time. If an interrupt comes in after the curr_ret_stack update and ftrace_graph_return(), it will be traced. It may look a little confusing to see it within the other function, but at least it will not be lost. Cc: Frederic Weisbecker Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions_graph.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 7008d2e13cf2..39ada66389cc 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -191,10 +191,16 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer) ftrace_pop_return_trace(&trace, &ret, frame_pointer); trace.rettime = trace_clock_local(); - ftrace_graph_return(&trace); barrier(); current->curr_ret_stack--; + /* + * The trace should run after decrementing the ret counter + * in case an interrupt were to come in. We don't want to + * lose the interrupt if max_depth is set. + */ + ftrace_graph_return(&trace); + if (unlikely(!ret)) { ftrace_graph_stop(); WARN_ON(1); -- cgit v1.2.3-59-g8ed1b From ec13abc37f71c89189350dce491189fb5b659184 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 25 Jan 2013 13:10:00 -0300 Subject: perf tests: Fix leaks on PERF_RECORD_* test This test: 7: Validate PERF_RECORD_* events & perf_sample fields needs to call perf_evlist__delete_maps(). Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-t3181qy15avffdacqjcxfku2@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/perf-record.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c index 6ea66cf6791b..1e8e5128d0da 100644 --- a/tools/perf/tests/perf-record.c +++ b/tools/perf/tests/perf-record.c @@ -96,7 +96,7 @@ int test__PERF_RECORD(void) err = perf_evlist__prepare_workload(evlist, &opts, argv); if (err < 0) { pr_debug("Couldn't run the workload!\n"); - goto out_delete_evlist; + goto out_delete_maps; } /* @@ -111,7 +111,7 @@ int test__PERF_RECORD(void) err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); if (err < 0) { pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); - goto out_delete_evlist; + goto out_delete_maps; } cpu = err; @@ -121,7 +121,7 @@ int test__PERF_RECORD(void) */ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { pr_debug("sched_setaffinity: %s\n", strerror(errno)); - goto out_delete_evlist; + goto out_delete_maps; } /* @@ -131,7 +131,7 @@ int test__PERF_RECORD(void) err = perf_evlist__open(evlist); if (err < 0) { pr_debug("perf_evlist__open: %s\n", strerror(errno)); - goto out_delete_evlist; + goto out_delete_maps; } /* @@ -142,7 +142,7 @@ int test__PERF_RECORD(void) err = perf_evlist__mmap(evlist, opts.mmap_pages, false); if (err < 0) { pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); - goto out_delete_evlist; + goto out_delete_maps; } /* @@ -305,6 +305,8 @@ found_exit: } out_err: perf_evlist__munmap(evlist); +out_delete_maps: + perf_evlist__delete_maps(evlist); out_delete_evlist: perf_evlist__delete(evlist); out: -- cgit v1.2.3-59-g8ed1b From 7e010562e01aff929126f671ff9e730e22dbdb1b Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 29 Jan 2013 11:48:11 +0100 Subject: tools: Correct typo in tools Makefile It should be make -C tools/ _install Signed-off-by: Borislav Petkov Cc: Ingo Molnar Link: http://lkml.kernel.org/r/1359456492-22156-1-git-send-email-bp@alien8.de Signed-off-by: Arnaldo Carvalho de Melo --- tools/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/Makefile b/tools/Makefile index 1f9a529fe544..798fa0ef048e 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -15,7 +15,7 @@ help: @echo ' x86_energy_perf_policy - Intel energy policy tool' @echo '' @echo 'You can do:' - @echo ' $$ make -C tools/_install' + @echo ' $$ make -C tools/ _install' @echo '' @echo ' from the kernel command line to build and install one of' @echo ' the tools above' -- cgit v1.2.3-59-g8ed1b From 1c13f3c9042f9d222959af7c9da6db93ea9f7e4c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 Dec 2012 13:51:59 +0100 Subject: perf: Add 'perf bench numa mem' NUMA performance measurement suite Add a suite of NUMA performance benchmarks. The goal was simulate the behavior and access patterns of real NUMA workloads, via a wide range of parameters, so this tool goes well beyond simple bzero() measurements that most NUMA micro-benchmarks use: - It processes the data and creates a chain of data dependencies, like a real workload would. Neither the compiler, nor the kernel (via KSM and other optimizations) nor the CPU can eliminate parts of the workload. - It randomizes the initial state and also randomizes the target addresses of the processing - it's not a simple forward scan of addresses. - It provides flexible options to set process, thread and memory relationship information: -G sets "global" memory shared between all test processes, -P sets "process" memory shared by all threads of a process and -T sets "thread" private memory. - There's a NUMA convergence monitoring and convergence latency measurement option via -c and -m. - Micro-sleeps and synchronization can be injected to provoke lock contention and scheduling, via the -u and -S options. This simulates IO and contention. - The -x option instructs the workload to 'perturb' itself artificially every N seconds, by moving to the first and last CPU of the system periodically. This way the stability of convergence equilibrium and the number of steps taken for the scheduler to reach equilibrium again can be measured. - The amount of work can be specified via the -l loop count, and/or via a -s seconds-timeout value. - CPU and node memory binding options, to test hard binding scenarios. THP can be turned on and off via madvise() calls. - Live reporting of convergence progress in an 'at glance' output format. Printing of convergence and deconvergence events. The 'perf bench numa mem -a' option will start an array of about 30 individual tests that will each output such measurements: # Running 5x5-bw-thread, "perf bench numa mem -p 5 -t 5 -P 512 -s 20 -zZ0q --thp 1" 5x5-bw-thread, 20.276, secs, runtime-max/thread 5x5-bw-thread, 20.004, secs, runtime-min/thread 5x5-bw-thread, 20.155, secs, runtime-avg/thread 5x5-bw-thread, 0.671, %, spread-runtime/thread 5x5-bw-thread, 21.153, GB, data/thread 5x5-bw-thread, 528.818, GB, data-total 5x5-bw-thread, 0.959, nsecs, runtime/byte/thread 5x5-bw-thread, 1.043, GB/sec, thread-speed 5x5-bw-thread, 26.081, GB/sec, total-speed See the help text and the code for more details. Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Steven Rostedt Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Mel Gorman Cc: Hugh Dickins Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 3 +- tools/perf/bench/bench.h | 1 + tools/perf/bench/numa.c | 1731 ++++++++++++++++++++++++++++++++++++++++++++ tools/perf/builtin-bench.c | 13 + 4 files changed, 1747 insertions(+), 1 deletion(-) create mode 100644 tools/perf/bench/numa.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index a84021abb3fe..b62dbc0d974a 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -103,7 +103,7 @@ ifdef PARSER_DEBUG endif CFLAGS = -fno-omit-frame-pointer -ggdb3 -funwind-tables -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS) -EXTLIBS = -lpthread -lrt -lelf -lm +EXTLIBS = -lpthread -lrt -lelf -lm -lnuma ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE ALL_LDFLAGS = $(LDFLAGS) STRIP ?= strip @@ -492,6 +492,7 @@ LIB_OBJS += $(OUTPUT)tests/python-use.o BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o BUILTIN_OBJS += $(OUTPUT)builtin-bench.o # Benchmark modules +BUILTIN_OBJS += $(OUTPUT)bench/numa.o BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o ifeq ($(RAW_ARCH),x86_64) diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index 8f89998eeaf4..a5223e6a7b43 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -1,6 +1,7 @@ #ifndef BENCH_H #define BENCH_H +extern int bench_numa(int argc, const char **argv, const char *prefix); extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); extern int bench_mem_memcpy(int argc, const char **argv, diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c new file mode 100644 index 000000000000..30d1c3225b46 --- /dev/null +++ b/tools/perf/bench/numa.c @@ -0,0 +1,1731 @@ +/* + * numa.c + * + * numa: Simulate NUMA-sensitive workload and measure their NUMA performance + */ + +#include "../perf.h" +#include "../builtin.h" +#include "../util/util.h" +#include "../util/parse-options.h" + +#include "bench.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * Regular printout to the terminal, supressed if -q is specified: + */ +#define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0) + +/* + * Debug printf: + */ +#define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0) + +struct thread_data { + int curr_cpu; + cpu_set_t bind_cpumask; + int bind_node; + u8 *process_data; + int process_nr; + int thread_nr; + int task_nr; + unsigned int loops_done; + u64 val; + u64 runtime_ns; + pthread_mutex_t *process_lock; +}; + +/* Parameters set by options: */ + +struct params { + /* Startup synchronization: */ + bool serialize_startup; + + /* Task hierarchy: */ + int nr_proc; + int nr_threads; + + /* Working set sizes: */ + const char *mb_global_str; + const char *mb_proc_str; + const char *mb_proc_locked_str; + const char *mb_thread_str; + + double mb_global; + double mb_proc; + double mb_proc_locked; + double mb_thread; + + /* Access patterns to the working set: */ + bool data_reads; + bool data_writes; + bool data_backwards; + bool data_zero_memset; + bool data_rand_walk; + u32 nr_loops; + u32 nr_secs; + u32 sleep_usecs; + + /* Working set initialization: */ + bool init_zero; + bool init_random; + bool init_cpu0; + + /* Misc options: */ + int show_details; + int run_all; + int thp; + + long bytes_global; + long bytes_process; + long bytes_process_locked; + long bytes_thread; + + int nr_tasks; + bool show_quiet; + + bool show_convergence; + bool measure_convergence; + + int perturb_secs; + int nr_cpus; + int nr_nodes; + + /* Affinity options -C and -N: */ + char *cpu_list_str; + char *node_list_str; +}; + + +/* Global, read-writable area, accessible to all processes and threads: */ + +struct global_info { + u8 *data; + + pthread_mutex_t startup_mutex; + int nr_tasks_started; + + pthread_mutex_t startup_done_mutex; + + pthread_mutex_t start_work_mutex; + int nr_tasks_working; + + pthread_mutex_t stop_work_mutex; + u64 bytes_done; + + struct thread_data *threads; + + /* Convergence latency measurement: */ + bool all_converged; + bool stop_work; + + int print_once; + + struct params p; +}; + +static struct global_info *g = NULL; + +static int parse_cpus_opt(const struct option *opt, const char *arg, int unset); +static int parse_nodes_opt(const struct option *opt, const char *arg, int unset); + +struct params p0; + +static const struct option options[] = { + OPT_INTEGER('p', "nr_proc" , &p0.nr_proc, "number of processes"), + OPT_INTEGER('t', "nr_threads" , &p0.nr_threads, "number of threads per process"), + + OPT_STRING('G', "mb_global" , &p0.mb_global_str, "MB", "global memory (MBs)"), + OPT_STRING('P', "mb_proc" , &p0.mb_proc_str, "MB", "process memory (MBs)"), + OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"), + OPT_STRING('T', "mb_thread" , &p0.mb_thread_str, "MB", "thread memory (MBs)"), + + OPT_UINTEGER('l', "nr_loops" , &p0.nr_loops, "max number of loops to run"), + OPT_UINTEGER('s', "nr_secs" , &p0.nr_secs, "max number of seconds to run"), + OPT_UINTEGER('u', "usleep" , &p0.sleep_usecs, "usecs to sleep per loop iteration"), + + OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via writes (can be mixed with -W)"), + OPT_BOOLEAN('W', "data_writes" , &p0.data_writes, "access the data via writes (can be mixed with -R)"), + OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards, "access the data backwards as well"), + OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"), + OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk, "access the data with random (32bit LFSR) walk"), + + + OPT_BOOLEAN('z', "init_zero" , &p0.init_zero, "bzero the initial allocations"), + OPT_BOOLEAN('I', "init_random" , &p0.init_random, "randomize the contents of the initial allocations"), + OPT_BOOLEAN('0', "init_cpu0" , &p0.init_cpu0, "do the initial allocations on CPU#0"), + OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs, "perturb thread 0/0 every X secs, to test convergence stability"), + + OPT_INCR ('d', "show_details" , &p0.show_details, "Show details"), + OPT_INCR ('a', "all" , &p0.run_all, "Run all tests in the suite"), + OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"), + OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"), + OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"), + OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "bzero the initial allocations"), + OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"), + + /* Special option string parsing callbacks: */ + OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]", + "bind the first N tasks to these specific cpus (the rest is unbound)", + parse_cpus_opt), + OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]", + "bind the first N tasks to these specific memory nodes (the rest is unbound)", + parse_nodes_opt), + OPT_END() +}; + +static const char * const bench_numa_usage[] = { + "perf bench numa ", + NULL +}; + +static const char * const numa_usage[] = { + "perf bench numa mem []", + NULL +}; + +static cpu_set_t bind_to_cpu(int target_cpu) +{ + cpu_set_t orig_mask, mask; + int ret; + + ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); + BUG_ON(ret); + + CPU_ZERO(&mask); + + if (target_cpu == -1) { + int cpu; + + for (cpu = 0; cpu < g->p.nr_cpus; cpu++) + CPU_SET(cpu, &mask); + } else { + BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus); + CPU_SET(target_cpu, &mask); + } + + ret = sched_setaffinity(0, sizeof(mask), &mask); + BUG_ON(ret); + + return orig_mask; +} + +static cpu_set_t bind_to_node(int target_node) +{ + int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes; + cpu_set_t orig_mask, mask; + int cpu; + int ret; + + BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus); + BUG_ON(!cpus_per_node); + + ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); + BUG_ON(ret); + + CPU_ZERO(&mask); + + if (target_node == -1) { + for (cpu = 0; cpu < g->p.nr_cpus; cpu++) + CPU_SET(cpu, &mask); + } else { + int cpu_start = (target_node + 0) * cpus_per_node; + int cpu_stop = (target_node + 1) * cpus_per_node; + + BUG_ON(cpu_stop > g->p.nr_cpus); + + for (cpu = cpu_start; cpu < cpu_stop; cpu++) + CPU_SET(cpu, &mask); + } + + ret = sched_setaffinity(0, sizeof(mask), &mask); + BUG_ON(ret); + + return orig_mask; +} + +static void bind_to_cpumask(cpu_set_t mask) +{ + int ret; + + ret = sched_setaffinity(0, sizeof(mask), &mask); + BUG_ON(ret); +} + +static void mempol_restore(void) +{ + int ret; + + ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1); + + BUG_ON(ret); +} + +static void bind_to_memnode(int node) +{ + unsigned long nodemask; + int ret; + + if (node == -1) + return; + + BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)); + nodemask = 1L << node; + + ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8); + dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret); + + BUG_ON(ret); +} + +#define HPSIZE (2*1024*1024) + +#define set_taskname(fmt...) \ +do { \ + char name[20]; \ + \ + snprintf(name, 20, fmt); \ + prctl(PR_SET_NAME, name); \ +} while (0) + +static u8 *alloc_data(ssize_t bytes0, int map_flags, + int init_zero, int init_cpu0, int thp, int init_random) +{ + cpu_set_t orig_mask; + ssize_t bytes; + u8 *buf; + int ret; + + if (!bytes0) + return NULL; + + /* Allocate and initialize all memory on CPU#0: */ + if (init_cpu0) { + orig_mask = bind_to_node(0); + bind_to_memnode(0); + } + + bytes = bytes0 + HPSIZE; + + buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0); + BUG_ON(buf == (void *)-1); + + if (map_flags == MAP_PRIVATE) { + if (thp > 0) { + ret = madvise(buf, bytes, MADV_HUGEPAGE); + if (ret && !g->print_once) { + g->print_once = 1; + printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n"); + } + } + if (thp < 0) { + ret = madvise(buf, bytes, MADV_NOHUGEPAGE); + if (ret && !g->print_once) { + g->print_once = 1; + printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n"); + } + } + } + + if (init_zero) { + bzero(buf, bytes); + } else { + /* Initialize random contents, different in each word: */ + if (init_random) { + u64 *wbuf = (void *)buf; + long off = rand(); + long i; + + for (i = 0; i < bytes/8; i++) + wbuf[i] = i + off; + } + } + + /* Align to 2MB boundary: */ + buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1)); + + /* Restore affinity: */ + if (init_cpu0) { + bind_to_cpumask(orig_mask); + mempol_restore(); + } + + return buf; +} + +static void free_data(void *data, ssize_t bytes) +{ + int ret; + + if (!data) + return; + + ret = munmap(data, bytes); + BUG_ON(ret); +} + +/* + * Create a shared memory buffer that can be shared between processes, zeroed: + */ +static void * zalloc_shared_data(ssize_t bytes) +{ + return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0, g->p.thp, g->p.init_random); +} + +/* + * Create a shared memory buffer that can be shared between processes: + */ +static void * setup_shared_data(ssize_t bytes) +{ + return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0, g->p.thp, g->p.init_random); +} + +/* + * Allocate process-local memory - this will either be shared between + * threads of this process, or only be accessed by this thread: + */ +static void * setup_private_data(ssize_t bytes) +{ + return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0, g->p.thp, g->p.init_random); +} + +/* + * Return a process-shared (global) mutex: + */ +static void init_global_mutex(pthread_mutex_t *mutex) +{ + pthread_mutexattr_t attr; + + pthread_mutexattr_init(&attr); + pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); + pthread_mutex_init(mutex, &attr); +} + +static int parse_cpu_list(const char *arg) +{ + p0.cpu_list_str = strdup(arg); + + dprintf("got CPU list: {%s}\n", p0.cpu_list_str); + + return 0; +} + +static void parse_setup_cpu_list(void) +{ + struct thread_data *td; + char *str0, *str; + int t; + + if (!g->p.cpu_list_str) + return; + + dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); + + str0 = str = strdup(g->p.cpu_list_str); + t = 0; + + BUG_ON(!str); + + tprintf("# binding tasks to CPUs:\n"); + tprintf("# "); + + while (true) { + int bind_cpu, bind_cpu_0, bind_cpu_1; + char *tok, *tok_end, *tok_step, *tok_len, *tok_mul; + int bind_len; + int step; + int mul; + + tok = strsep(&str, ","); + if (!tok) + break; + + tok_end = strstr(tok, "-"); + + dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end); + if (!tok_end) { + /* Single CPU specified: */ + bind_cpu_0 = bind_cpu_1 = atol(tok); + } else { + /* CPU range specified (for example: "5-11"): */ + bind_cpu_0 = atol(tok); + bind_cpu_1 = atol(tok_end + 1); + } + + step = 1; + tok_step = strstr(tok, "#"); + if (tok_step) { + step = atol(tok_step + 1); + BUG_ON(step <= 0 || step >= g->p.nr_cpus); + } + + /* + * Mask length. + * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4', + * where the _4 means the next 4 CPUs are allowed. + */ + bind_len = 1; + tok_len = strstr(tok, "_"); + if (tok_len) { + bind_len = atol(tok_len + 1); + BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus); + } + + /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */ + mul = 1; + tok_mul = strstr(tok, "x"); + if (tok_mul) { + mul = atol(tok_mul + 1); + BUG_ON(mul <= 0); + } + + dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul); + + BUG_ON(bind_cpu_0 < 0 || bind_cpu_0 >= g->p.nr_cpus); + BUG_ON(bind_cpu_1 < 0 || bind_cpu_1 >= g->p.nr_cpus); + BUG_ON(bind_cpu_0 > bind_cpu_1); + + for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) { + int i; + + for (i = 0; i < mul; i++) { + int cpu; + + if (t >= g->p.nr_tasks) { + printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu); + goto out; + } + td = g->threads + t; + + if (t) + tprintf(","); + if (bind_len > 1) { + tprintf("%2d/%d", bind_cpu, bind_len); + } else { + tprintf("%2d", bind_cpu); + } + + CPU_ZERO(&td->bind_cpumask); + for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) { + BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus); + CPU_SET(cpu, &td->bind_cpumask); + } + t++; + } + } + } +out: + + tprintf("\n"); + + if (t < g->p.nr_tasks) + printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t); + + free(str0); +} + +static int parse_cpus_opt(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) +{ + if (!arg) + return -1; + + return parse_cpu_list(arg); +} + +static int parse_node_list(const char *arg) +{ + p0.node_list_str = strdup(arg); + + dprintf("got NODE list: {%s}\n", p0.node_list_str); + + return 0; +} + +static void parse_setup_node_list(void) +{ + struct thread_data *td; + char *str0, *str; + int t; + + if (!g->p.node_list_str) + return; + + dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); + + str0 = str = strdup(g->p.node_list_str); + t = 0; + + BUG_ON(!str); + + tprintf("# binding tasks to NODEs:\n"); + tprintf("# "); + + while (true) { + int bind_node, bind_node_0, bind_node_1; + char *tok, *tok_end, *tok_step, *tok_mul; + int step; + int mul; + + tok = strsep(&str, ","); + if (!tok) + break; + + tok_end = strstr(tok, "-"); + + dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end); + if (!tok_end) { + /* Single NODE specified: */ + bind_node_0 = bind_node_1 = atol(tok); + } else { + /* NODE range specified (for example: "5-11"): */ + bind_node_0 = atol(tok); + bind_node_1 = atol(tok_end + 1); + } + + step = 1; + tok_step = strstr(tok, "#"); + if (tok_step) { + step = atol(tok_step + 1); + BUG_ON(step <= 0 || step >= g->p.nr_nodes); + } + + /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */ + mul = 1; + tok_mul = strstr(tok, "x"); + if (tok_mul) { + mul = atol(tok_mul + 1); + BUG_ON(mul <= 0); + } + + dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step); + + BUG_ON(bind_node_0 < 0 || bind_node_0 >= g->p.nr_nodes); + BUG_ON(bind_node_1 < 0 || bind_node_1 >= g->p.nr_nodes); + BUG_ON(bind_node_0 > bind_node_1); + + for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) { + int i; + + for (i = 0; i < mul; i++) { + if (t >= g->p.nr_tasks) { + printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node); + goto out; + } + td = g->threads + t; + + if (!t) + tprintf(" %2d", bind_node); + else + tprintf(",%2d", bind_node); + + td->bind_node = bind_node; + t++; + } + } + } +out: + + tprintf("\n"); + + if (t < g->p.nr_tasks) + printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t); + + free(str0); +} + +static int parse_nodes_opt(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) +{ + if (!arg) + return -1; + + return parse_node_list(arg); + + return 0; +} + +#define BIT(x) (1ul << x) + +static inline uint32_t lfsr_32(uint32_t lfsr) +{ + const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31); + return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps); +} + +/* + * Make sure there's real data dependency to RAM (when read + * accesses are enabled), so the compiler, the CPU and the + * kernel (KSM, zero page, etc.) cannot optimize away RAM + * accesses: + */ +static inline u64 access_data(u64 *data __attribute__((unused)), u64 val) +{ + if (g->p.data_reads) + val += *data; + if (g->p.data_writes) + *data = val + 1; + return val; +} + +/* + * The worker process does two types of work, a forwards going + * loop and a backwards going loop. + * + * We do this so that on multiprocessor systems we do not create + * a 'train' of processing, with highly synchronized processes, + * skewing the whole benchmark. + */ +static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val) +{ + long words = bytes/sizeof(u64); + u64 *data = (void *)__data; + long chunk_0, chunk_1; + u64 *d0, *d, *d1; + long off; + long i; + + BUG_ON(!data && words); + BUG_ON(data && !words); + + if (!data) + return val; + + /* Very simple memset() work variant: */ + if (g->p.data_zero_memset && !g->p.data_rand_walk) { + bzero(data, bytes); + return val; + } + + /* Spread out by PID/TID nr and by loop nr: */ + chunk_0 = words/nr_max; + chunk_1 = words/g->p.nr_loops; + off = nr*chunk_0 + loop*chunk_1; + + while (off >= words) + off -= words; + + if (g->p.data_rand_walk) { + u32 lfsr = nr + loop + val; + int j; + + for (i = 0; i < words/1024; i++) { + long start, end; + + lfsr = lfsr_32(lfsr); + + start = lfsr % words; + end = min(start + 1024, words-1); + + if (g->p.data_zero_memset) { + bzero(data + start, (end-start) * sizeof(u64)); + } else { + for (j = start; j < end; j++) + val = access_data(data + j, val); + } + } + } else if (!g->p.data_backwards || (nr + loop) & 1) { + + d0 = data + off; + d = data + off + 1; + d1 = data + words; + + /* Process data forwards: */ + for (;;) { + if (unlikely(d >= d1)) + d = data; + if (unlikely(d == d0)) + break; + + val = access_data(d, val); + + d++; + } + } else { + /* Process data backwards: */ + + d0 = data + off; + d = data + off - 1; + d1 = data + words; + + /* Process data forwards: */ + for (;;) { + if (unlikely(d < data)) + d = data + words-1; + if (unlikely(d == d0)) + break; + + val = access_data(d, val); + + d--; + } + } + + return val; +} + +static void update_curr_cpu(int task_nr, unsigned long bytes_worked) +{ + unsigned int cpu; + + cpu = sched_getcpu(); + + g->threads[task_nr].curr_cpu = cpu; + prctl(0, bytes_worked); +} + +#define MAX_NR_NODES 64 + +/* + * Count the number of nodes a process's threads + * are spread out on. + * + * A count of 1 means that the process is compressed + * to a single node. A count of g->p.nr_nodes means it's + * spread out on the whole system. + */ +static int count_process_nodes(int process_nr) +{ + char node_present[MAX_NR_NODES] = { 0, }; + int nodes; + int n, t; + + for (t = 0; t < g->p.nr_threads; t++) { + struct thread_data *td; + int task_nr; + int node; + + task_nr = process_nr*g->p.nr_threads + t; + td = g->threads + task_nr; + + node = numa_node_of_cpu(td->curr_cpu); + node_present[node] = 1; + } + + nodes = 0; + + for (n = 0; n < MAX_NR_NODES; n++) + nodes += node_present[n]; + + return nodes; +} + +/* + * Count the number of distinct process-threads a node contains. + * + * A count of 1 means that the node contains only a single + * process. If all nodes on the system contain at most one + * process then we are well-converged. + */ +static int count_node_processes(int node) +{ + int processes = 0; + int t, p; + + for (p = 0; p < g->p.nr_proc; p++) { + for (t = 0; t < g->p.nr_threads; t++) { + struct thread_data *td; + int task_nr; + int n; + + task_nr = p*g->p.nr_threads + t; + td = g->threads + task_nr; + + n = numa_node_of_cpu(td->curr_cpu); + if (n == node) { + processes++; + break; + } + } + } + + return processes; +} + +static void calc_convergence_compression(int *strong) +{ + unsigned int nodes_min, nodes_max; + int p; + + nodes_min = -1; + nodes_max = 0; + + for (p = 0; p < g->p.nr_proc; p++) { + unsigned int nodes = count_process_nodes(p); + + nodes_min = min(nodes, nodes_min); + nodes_max = max(nodes, nodes_max); + } + + /* Strong convergence: all threads compress on a single node: */ + if (nodes_min == 1 && nodes_max == 1) { + *strong = 1; + } else { + *strong = 0; + tprintf(" {%d-%d}", nodes_min, nodes_max); + } +} + +static void calc_convergence(double runtime_ns_max, double *convergence) +{ + unsigned int loops_done_min, loops_done_max; + int process_groups; + int nodes[MAX_NR_NODES]; + int distance; + int nr_min; + int nr_max; + int strong; + int sum; + int nr; + int node; + int cpu; + int t; + + if (!g->p.show_convergence && !g->p.measure_convergence) + return; + + for (node = 0; node < g->p.nr_nodes; node++) + nodes[node] = 0; + + loops_done_min = -1; + loops_done_max = 0; + + for (t = 0; t < g->p.nr_tasks; t++) { + struct thread_data *td = g->threads + t; + unsigned int loops_done; + + cpu = td->curr_cpu; + + /* Not all threads have written it yet: */ + if (cpu < 0) + continue; + + node = numa_node_of_cpu(cpu); + + nodes[node]++; + + loops_done = td->loops_done; + loops_done_min = min(loops_done, loops_done_min); + loops_done_max = max(loops_done, loops_done_max); + } + + nr_max = 0; + nr_min = g->p.nr_tasks; + sum = 0; + + for (node = 0; node < g->p.nr_nodes; node++) { + nr = nodes[node]; + nr_min = min(nr, nr_min); + nr_max = max(nr, nr_max); + sum += nr; + } + BUG_ON(nr_min > nr_max); + + BUG_ON(sum > g->p.nr_tasks); + + if (0 && (sum < g->p.nr_tasks)) + return; + + /* + * Count the number of distinct process groups present + * on nodes - when we are converged this will decrease + * to g->p.nr_proc: + */ + process_groups = 0; + + for (node = 0; node < g->p.nr_nodes; node++) { + int processes = count_node_processes(node); + + nr = nodes[node]; + tprintf(" %2d/%-2d", nr, processes); + + process_groups += processes; + } + + distance = nr_max - nr_min; + + tprintf(" [%2d/%-2d]", distance, process_groups); + + tprintf(" l:%3d-%-3d (%3d)", + loops_done_min, loops_done_max, loops_done_max-loops_done_min); + + if (loops_done_min && loops_done_max) { + double skew = 1.0 - (double)loops_done_min/loops_done_max; + + tprintf(" [%4.1f%%]", skew * 100.0); + } + + calc_convergence_compression(&strong); + + if (strong && process_groups == g->p.nr_proc) { + if (!*convergence) { + *convergence = runtime_ns_max; + tprintf(" (%6.1fs converged)\n", *convergence/1e9); + if (g->p.measure_convergence) { + g->all_converged = true; + g->stop_work = true; + } + } + } else { + if (*convergence) { + tprintf(" (%6.1fs de-converged)", runtime_ns_max/1e9); + *convergence = 0; + } + tprintf("\n"); + } +} + +static void show_summary(double runtime_ns_max, int l, double *convergence) +{ + tprintf("\r # %5.1f%% [%.1f mins]", + (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max/1e9 / 60.0); + + calc_convergence(runtime_ns_max, convergence); + + if (g->p.show_details >= 0) + fflush(stdout); +} + +static void *worker_thread(void *__tdata) +{ + struct thread_data *td = __tdata; + struct timeval start0, start, stop, diff; + int process_nr = td->process_nr; + int thread_nr = td->thread_nr; + unsigned long last_perturbance; + int task_nr = td->task_nr; + int details = g->p.show_details; + int first_task, last_task; + double convergence = 0; + u64 val = td->val; + double runtime_ns_max; + u8 *global_data; + u8 *process_data; + u8 *thread_data; + u64 bytes_done; + long work_done; + u32 l; + + bind_to_cpumask(td->bind_cpumask); + bind_to_memnode(td->bind_node); + + set_taskname("thread %d/%d", process_nr, thread_nr); + + global_data = g->data; + process_data = td->process_data; + thread_data = setup_private_data(g->p.bytes_thread); + + bytes_done = 0; + + last_task = 0; + if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1) + last_task = 1; + + first_task = 0; + if (process_nr == 0 && thread_nr == 0) + first_task = 1; + + if (details >= 2) { + printf("# thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n", + process_nr, thread_nr, global_data, process_data, thread_data); + } + + if (g->p.serialize_startup) { + pthread_mutex_lock(&g->startup_mutex); + g->nr_tasks_started++; + pthread_mutex_unlock(&g->startup_mutex); + + /* Here we will wait for the main process to start us all at once: */ + pthread_mutex_lock(&g->start_work_mutex); + g->nr_tasks_working++; + + /* Last one wake the main process: */ + if (g->nr_tasks_working == g->p.nr_tasks) + pthread_mutex_unlock(&g->startup_done_mutex); + + pthread_mutex_unlock(&g->start_work_mutex); + } + + gettimeofday(&start0, NULL); + + start = stop = start0; + last_perturbance = start.tv_sec; + + for (l = 0; l < g->p.nr_loops; l++) { + start = stop; + + if (g->stop_work) + break; + + val += do_work(global_data, g->p.bytes_global, process_nr, g->p.nr_proc, l, val); + val += do_work(process_data, g->p.bytes_process, thread_nr, g->p.nr_threads, l, val); + val += do_work(thread_data, g->p.bytes_thread, 0, 1, l, val); + + if (g->p.sleep_usecs) { + pthread_mutex_lock(td->process_lock); + usleep(g->p.sleep_usecs); + pthread_mutex_unlock(td->process_lock); + } + /* + * Amount of work to be done under a process-global lock: + */ + if (g->p.bytes_process_locked) { + pthread_mutex_lock(td->process_lock); + val += do_work(process_data, g->p.bytes_process_locked, thread_nr, g->p.nr_threads, l, val); + pthread_mutex_unlock(td->process_lock); + } + + work_done = g->p.bytes_global + g->p.bytes_process + + g->p.bytes_process_locked + g->p.bytes_thread; + + update_curr_cpu(task_nr, work_done); + bytes_done += work_done; + + if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs) + continue; + + td->loops_done = l; + + gettimeofday(&stop, NULL); + + /* Check whether our max runtime timed out: */ + if (g->p.nr_secs) { + timersub(&stop, &start0, &diff); + if (diff.tv_sec >= g->p.nr_secs) { + g->stop_work = true; + break; + } + } + + /* Update the summary at most once per second: */ + if (start.tv_sec == stop.tv_sec) + continue; + + /* + * Perturb the first task's equilibrium every g->p.perturb_secs seconds, + * by migrating to CPU#0: + */ + if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) { + cpu_set_t orig_mask; + int target_cpu; + int this_cpu; + + last_perturbance = stop.tv_sec; + + /* + * Depending on where we are running, move into + * the other half of the system, to create some + * real disturbance: + */ + this_cpu = g->threads[task_nr].curr_cpu; + if (this_cpu < g->p.nr_cpus/2) + target_cpu = g->p.nr_cpus-1; + else + target_cpu = 0; + + orig_mask = bind_to_cpu(target_cpu); + + /* Here we are running on the target CPU already */ + if (details >= 1) + printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu); + + bind_to_cpumask(orig_mask); + } + + if (details >= 3) { + timersub(&stop, &start, &diff); + runtime_ns_max = diff.tv_sec * 1000000000; + runtime_ns_max += diff.tv_usec * 1000; + + if (details >= 0) { + printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016lx]\n", + process_nr, thread_nr, runtime_ns_max / bytes_done, val); + } + fflush(stdout); + } + if (!last_task) + continue; + + timersub(&stop, &start0, &diff); + runtime_ns_max = diff.tv_sec * 1000000000ULL; + runtime_ns_max += diff.tv_usec * 1000ULL; + + show_summary(runtime_ns_max, l, &convergence); + } + + gettimeofday(&stop, NULL); + timersub(&stop, &start0, &diff); + td->runtime_ns = diff.tv_sec * 1000000000ULL; + td->runtime_ns += diff.tv_usec * 1000ULL; + + free_data(thread_data, g->p.bytes_thread); + + pthread_mutex_lock(&g->stop_work_mutex); + g->bytes_done += bytes_done; + pthread_mutex_unlock(&g->stop_work_mutex); + + return NULL; +} + +/* + * A worker process starts a couple of threads: + */ +static void worker_process(int process_nr) +{ + pthread_mutex_t process_lock; + struct thread_data *td; + pthread_t *pthreads; + u8 *process_data; + int task_nr; + int ret; + int t; + + pthread_mutex_init(&process_lock, NULL); + set_taskname("process %d", process_nr); + + /* + * Pick up the memory policy and the CPU binding of our first thread, + * so that we initialize memory accordingly: + */ + task_nr = process_nr*g->p.nr_threads; + td = g->threads + task_nr; + + bind_to_memnode(td->bind_node); + bind_to_cpumask(td->bind_cpumask); + + pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t)); + process_data = setup_private_data(g->p.bytes_process); + + if (g->p.show_details >= 3) { + printf(" # process %2d global mem: %p, process mem: %p\n", + process_nr, g->data, process_data); + } + + for (t = 0; t < g->p.nr_threads; t++) { + task_nr = process_nr*g->p.nr_threads + t; + td = g->threads + task_nr; + + td->process_data = process_data; + td->process_nr = process_nr; + td->thread_nr = t; + td->task_nr = task_nr; + td->val = rand(); + td->curr_cpu = -1; + td->process_lock = &process_lock; + + ret = pthread_create(pthreads + t, NULL, worker_thread, td); + BUG_ON(ret); + } + + for (t = 0; t < g->p.nr_threads; t++) { + ret = pthread_join(pthreads[t], NULL); + BUG_ON(ret); + } + + free_data(process_data, g->p.bytes_process); + free(pthreads); +} + +static void print_summary(void) +{ + if (g->p.show_details < 0) + return; + + printf("\n ###\n"); + printf(" # %d %s will execute (on %d nodes, %d CPUs):\n", + g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus); + printf(" # %5dx %5ldMB global shared mem operations\n", + g->p.nr_loops, g->p.bytes_global/1024/1024); + printf(" # %5dx %5ldMB process shared mem operations\n", + g->p.nr_loops, g->p.bytes_process/1024/1024); + printf(" # %5dx %5ldMB thread local mem operations\n", + g->p.nr_loops, g->p.bytes_thread/1024/1024); + + printf(" ###\n"); + + printf("\n ###\n"); fflush(stdout); +} + +static void init_thread_data(void) +{ + ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; + int t; + + g->threads = zalloc_shared_data(size); + + for (t = 0; t < g->p.nr_tasks; t++) { + struct thread_data *td = g->threads + t; + int cpu; + + /* Allow all nodes by default: */ + td->bind_node = -1; + + /* Allow all CPUs by default: */ + CPU_ZERO(&td->bind_cpumask); + for (cpu = 0; cpu < g->p.nr_cpus; cpu++) + CPU_SET(cpu, &td->bind_cpumask); + } +} + +static void deinit_thread_data(void) +{ + ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; + + free_data(g->threads, size); +} + +static int init(void) +{ + g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0); + + /* Copy over options: */ + g->p = p0; + + g->p.nr_cpus = numa_num_configured_cpus(); + + g->p.nr_nodes = numa_max_node() + 1; + + /* char array in count_process_nodes(): */ + BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0); + + if (g->p.show_quiet && !g->p.show_details) + g->p.show_details = -1; + + /* Some memory should be specified: */ + if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str) + return -1; + + if (g->p.mb_global_str) { + g->p.mb_global = atof(g->p.mb_global_str); + BUG_ON(g->p.mb_global < 0); + } + + if (g->p.mb_proc_str) { + g->p.mb_proc = atof(g->p.mb_proc_str); + BUG_ON(g->p.mb_proc < 0); + } + + if (g->p.mb_proc_locked_str) { + g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str); + BUG_ON(g->p.mb_proc_locked < 0); + BUG_ON(g->p.mb_proc_locked > g->p.mb_proc); + } + + if (g->p.mb_thread_str) { + g->p.mb_thread = atof(g->p.mb_thread_str); + BUG_ON(g->p.mb_thread < 0); + } + + BUG_ON(g->p.nr_threads <= 0); + BUG_ON(g->p.nr_proc <= 0); + + g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads; + + g->p.bytes_global = g->p.mb_global *1024L*1024L; + g->p.bytes_process = g->p.mb_proc *1024L*1024L; + g->p.bytes_process_locked = g->p.mb_proc_locked *1024L*1024L; + g->p.bytes_thread = g->p.mb_thread *1024L*1024L; + + g->data = setup_shared_data(g->p.bytes_global); + + /* Startup serialization: */ + init_global_mutex(&g->start_work_mutex); + init_global_mutex(&g->startup_mutex); + init_global_mutex(&g->startup_done_mutex); + init_global_mutex(&g->stop_work_mutex); + + init_thread_data(); + + tprintf("#\n"); + parse_setup_cpu_list(); + parse_setup_node_list(); + tprintf("#\n"); + + print_summary(); + + return 0; +} + +static void deinit(void) +{ + free_data(g->data, g->p.bytes_global); + g->data = NULL; + + deinit_thread_data(); + + free_data(g, sizeof(*g)); + g = NULL; +} + +/* + * Print a short or long result, depending on the verbosity setting: + */ +static void print_res(const char *name, double val, + const char *txt_unit, const char *txt_short, const char *txt_long) +{ + if (!name) + name = "main,"; + + if (g->p.show_quiet) + printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short); + else + printf(" %14.3f %s\n", val, txt_long); +} + +static int __bench_numa(const char *name) +{ + struct timeval start, stop, diff; + u64 runtime_ns_min, runtime_ns_sum; + pid_t *pids, pid, wpid; + double delta_runtime; + double runtime_avg; + double runtime_sec_max; + double runtime_sec_min; + int wait_stat; + double bytes; + int i, t; + + if (init()) + return -1; + + pids = zalloc(g->p.nr_proc * sizeof(*pids)); + pid = -1; + + /* All threads try to acquire it, this way we can wait for them to start up: */ + pthread_mutex_lock(&g->start_work_mutex); + + if (g->p.serialize_startup) { + tprintf(" #\n"); + tprintf(" # Startup synchronization: ..."); fflush(stdout); + } + + gettimeofday(&start, NULL); + + for (i = 0; i < g->p.nr_proc; i++) { + pid = fork(); + dprintf(" # process %2d: PID %d\n", i, pid); + + BUG_ON(pid < 0); + if (!pid) { + /* Child process: */ + worker_process(i); + + exit(0); + } + pids[i] = pid; + + } + /* Wait for all the threads to start up: */ + while (g->nr_tasks_started != g->p.nr_tasks) + usleep(1000); + + BUG_ON(g->nr_tasks_started != g->p.nr_tasks); + + if (g->p.serialize_startup) { + double startup_sec; + + pthread_mutex_lock(&g->startup_done_mutex); + + /* This will start all threads: */ + pthread_mutex_unlock(&g->start_work_mutex); + + /* This mutex is locked - the last started thread will wake us: */ + pthread_mutex_lock(&g->startup_done_mutex); + + gettimeofday(&stop, NULL); + + timersub(&stop, &start, &diff); + + startup_sec = diff.tv_sec * 1000000000.0; + startup_sec += diff.tv_usec * 1000.0; + startup_sec /= 1e9; + + tprintf(" threads initialized in %.6f seconds.\n", startup_sec); + tprintf(" #\n"); + + start = stop; + pthread_mutex_unlock(&g->startup_done_mutex); + } else { + gettimeofday(&start, NULL); + } + + /* Parent process: */ + + + for (i = 0; i < g->p.nr_proc; i++) { + wpid = waitpid(pids[i], &wait_stat, 0); + BUG_ON(wpid < 0); + BUG_ON(!WIFEXITED(wait_stat)); + + } + + runtime_ns_sum = 0; + runtime_ns_min = -1LL; + + for (t = 0; t < g->p.nr_tasks; t++) { + u64 thread_runtime_ns = g->threads[t].runtime_ns; + + runtime_ns_sum += thread_runtime_ns; + runtime_ns_min = min(thread_runtime_ns, runtime_ns_min); + } + + gettimeofday(&stop, NULL); + timersub(&stop, &start, &diff); + + BUG_ON(bench_format != BENCH_FORMAT_DEFAULT); + + tprintf("\n ###\n"); + tprintf("\n"); + + runtime_sec_max = diff.tv_sec * 1000000000.0; + runtime_sec_max += diff.tv_usec * 1000.0; + runtime_sec_max /= 1e9; + + runtime_sec_min = runtime_ns_min/1e9; + + bytes = g->bytes_done; + runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / 1e9; + + if (g->p.measure_convergence) { + print_res(name, runtime_sec_max, + "secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge"); + } + + print_res(name, runtime_sec_max, + "secs,", "runtime-max/thread", "secs slowest (max) thread-runtime"); + + print_res(name, runtime_sec_min, + "secs,", "runtime-min/thread", "secs fastest (min) thread-runtime"); + + print_res(name, runtime_avg, + "secs,", "runtime-avg/thread", "secs average thread-runtime"); + + delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0; + print_res(name, delta_runtime / runtime_sec_max * 100.0, + "%,", "spread-runtime/thread", "% difference between max/avg runtime"); + + print_res(name, bytes / g->p.nr_tasks / 1e9, + "GB,", "data/thread", "GB data processed, per thread"); + + print_res(name, bytes / 1e9, + "GB,", "data-total", "GB data processed, total"); + + print_res(name, runtime_sec_max * 1e9 / (bytes / g->p.nr_tasks), + "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime"); + + print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max, + "GB/sec,", "thread-speed", "GB/sec/thread speed"); + + print_res(name, bytes / runtime_sec_max / 1e9, + "GB/sec,", "total-speed", "GB/sec total speed"); + + free(pids); + + deinit(); + + return 0; +} + +#define MAX_ARGS 50 + +static int command_size(const char **argv) +{ + int size = 0; + + while (*argv) { + size++; + argv++; + } + + BUG_ON(size >= MAX_ARGS); + + return size; +} + +static void init_params(struct params *p, const char *name, int argc, const char **argv) +{ + int i; + + printf("\n # Running %s \"perf bench numa", name); + + for (i = 0; i < argc; i++) + printf(" %s", argv[i]); + + printf("\"\n"); + + memset(p, 0, sizeof(*p)); + + /* Initialize nonzero defaults: */ + + p->serialize_startup = 1; + p->data_reads = true; + p->data_writes = true; + p->data_backwards = true; + p->data_rand_walk = true; + p->nr_loops = -1; + p->init_random = true; +} + +static int run_bench_numa(const char *name, const char **argv) +{ + int argc = command_size(argv); + + init_params(&p0, name, argc, argv); + argc = parse_options(argc, argv, options, bench_numa_usage, 0); + if (argc) + goto err; + + if (__bench_numa(name)) + goto err; + + return 0; + +err: + usage_with_options(numa_usage, options); + return -1; +} + +#define OPT_BW_RAM "-s", "20", "-zZq", "--thp", " 1", "--no-data_rand_walk" +#define OPT_BW_RAM_NOTHP OPT_BW_RAM, "--thp", "-1" + +#define OPT_CONV "-s", "100", "-zZ0qcm", "--thp", " 1" +#define OPT_CONV_NOTHP OPT_CONV, "--thp", "-1" + +#define OPT_BW "-s", "20", "-zZ0q", "--thp", " 1" +#define OPT_BW_NOTHP OPT_BW, "--thp", "-1" + +/* + * The built-in test-suite executed by "perf bench numa -a". + * + * (A minimum of 4 nodes and 16 GB of RAM is recommended.) + */ +static const char *tests[][MAX_ARGS] = { + /* Basic single-stream NUMA bandwidth measurements: */ + { "RAM-bw-local,", "mem", "-p", "1", "-t", "1", "-P", "1024", + "-C" , "0", "-M", "0", OPT_BW_RAM }, + { "RAM-bw-local-NOTHP,", + "mem", "-p", "1", "-t", "1", "-P", "1024", + "-C" , "0", "-M", "0", OPT_BW_RAM_NOTHP }, + { "RAM-bw-remote,", "mem", "-p", "1", "-t", "1", "-P", "1024", + "-C" , "0", "-M", "1", OPT_BW_RAM }, + + /* 2-stream NUMA bandwidth measurements: */ + { "RAM-bw-local-2x,", "mem", "-p", "2", "-t", "1", "-P", "1024", + "-C", "0,2", "-M", "0x2", OPT_BW_RAM }, + { "RAM-bw-remote-2x,", "mem", "-p", "2", "-t", "1", "-P", "1024", + "-C", "0,2", "-M", "1x2", OPT_BW_RAM }, + + /* Cross-stream NUMA bandwidth measurement: */ + { "RAM-bw-cross,", "mem", "-p", "2", "-t", "1", "-P", "1024", + "-C", "0,8", "-M", "1,0", OPT_BW_RAM }, + + /* Convergence latency measurements: */ + { " 1x3-convergence,", "mem", "-p", "1", "-t", "3", "-P", "512", OPT_CONV }, + { " 1x4-convergence,", "mem", "-p", "1", "-t", "4", "-P", "512", OPT_CONV }, + { " 1x6-convergence,", "mem", "-p", "1", "-t", "6", "-P", "1020", OPT_CONV }, + { " 2x3-convergence,", "mem", "-p", "3", "-t", "3", "-P", "1020", OPT_CONV }, + { " 3x3-convergence,", "mem", "-p", "3", "-t", "3", "-P", "1020", OPT_CONV }, + { " 4x4-convergence,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV }, + { " 4x4-convergence-NOTHP,", + "mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV_NOTHP }, + { " 4x6-convergence,", "mem", "-p", "4", "-t", "6", "-P", "1020", OPT_CONV }, + { " 4x8-convergence,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_CONV }, + { " 8x4-convergence,", "mem", "-p", "8", "-t", "4", "-P", "512", OPT_CONV }, + { " 8x4-convergence-NOTHP,", + "mem", "-p", "8", "-t", "4", "-P", "512", OPT_CONV_NOTHP }, + { " 3x1-convergence,", "mem", "-p", "3", "-t", "1", "-P", "512", OPT_CONV }, + { " 4x1-convergence,", "mem", "-p", "4", "-t", "1", "-P", "512", OPT_CONV }, + { " 8x1-convergence,", "mem", "-p", "8", "-t", "1", "-P", "512", OPT_CONV }, + { "16x1-convergence,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_CONV }, + { "32x1-convergence,", "mem", "-p", "32", "-t", "1", "-P", "128", OPT_CONV }, + + /* Various NUMA process/thread layout bandwidth measurements: */ + { " 2x1-bw-process,", "mem", "-p", "2", "-t", "1", "-P", "1024", OPT_BW }, + { " 3x1-bw-process,", "mem", "-p", "3", "-t", "1", "-P", "1024", OPT_BW }, + { " 4x1-bw-process,", "mem", "-p", "4", "-t", "1", "-P", "1024", OPT_BW }, + { " 8x1-bw-process,", "mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW }, + { " 8x1-bw-process-NOTHP,", + "mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW_NOTHP }, + { "16x1-bw-process,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_BW }, + + { " 4x1-bw-thread,", "mem", "-p", "1", "-t", "4", "-T", "256", OPT_BW }, + { " 8x1-bw-thread,", "mem", "-p", "1", "-t", "8", "-T", "256", OPT_BW }, + { "16x1-bw-thread,", "mem", "-p", "1", "-t", "16", "-T", "128", OPT_BW }, + { "32x1-bw-thread,", "mem", "-p", "1", "-t", "32", "-T", "64", OPT_BW }, + + { " 2x3-bw-thread,", "mem", "-p", "2", "-t", "3", "-P", "512", OPT_BW }, + { " 4x4-bw-thread,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_BW }, + { " 4x6-bw-thread,", "mem", "-p", "4", "-t", "6", "-P", "512", OPT_BW }, + { " 4x8-bw-thread,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW }, + { " 4x8-bw-thread-NOTHP,", + "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW_NOTHP }, + { " 3x3-bw-thread,", "mem", "-p", "3", "-t", "3", "-P", "512", OPT_BW }, + { " 5x5-bw-thread,", "mem", "-p", "5", "-t", "5", "-P", "512", OPT_BW }, + + { "2x16-bw-thread,", "mem", "-p", "2", "-t", "16", "-P", "512", OPT_BW }, + { "1x32-bw-thread,", "mem", "-p", "1", "-t", "32", "-P", "2048", OPT_BW }, + + { "numa02-bw,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW }, + { "numa02-bw-NOTHP,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW_NOTHP }, + { "numa01-bw-thread,", "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW }, + { "numa01-bw-thread-NOTHP,", + "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW_NOTHP }, +}; + +static int bench_all(void) +{ + int nr = ARRAY_SIZE(tests); + int ret; + int i; + + ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'"); + BUG_ON(ret < 0); + + for (i = 0; i < nr; i++) { + if (run_bench_numa(tests[i][0], tests[i] + 1)) + return -1; + } + + printf("\n"); + + return 0; +} + +int bench_numa(int argc, const char **argv, const char *prefix __maybe_unused) +{ + init_params(&p0, "main,", argc, argv); + argc = parse_options(argc, argv, options, bench_numa_usage, 0); + if (argc) + goto err; + + if (p0.run_all) + return bench_all(); + + if (__bench_numa(NULL)) + goto err; + + return 0; + +err: + usage_with_options(numa_usage, options); + return -1; +} diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index afd1255a632f..e5d514bf5365 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -35,6 +35,16 @@ struct bench_suite { /* sentinel: easy for help */ #define suite_all { "all", "Test all benchmark suites", NULL } +static struct bench_suite numa_suites[] = { + { "mem", + "Benchmark for NUMA workloads", + bench_numa }, + suite_all, + { NULL, + NULL, + NULL } +}; + static struct bench_suite sched_suites[] = { { "messaging", "Benchmark for scheduler and IPC mechanisms", @@ -68,6 +78,9 @@ struct bench_subsys { }; static struct bench_subsys subsystems[] = { + { "numa", + "NUMA scheduling and MM behavior", + numa_suites }, { "sched", "scheduler and IPC mechanism", sched_suites }, -- cgit v1.2.3-59-g8ed1b From 79d824e31692d165f6c7d92bf4d1af0b9d969d76 Mon Sep 17 00:00:00 2001 From: Peter Hurley Date: Sun, 27 Jan 2013 20:51:22 -0500 Subject: perf tools: Make numa benchmark optional Commit "perf: Add 'perf bench numa mem'..." added a NUMA performance benchmark to perf. Make this optional and test for required dependencies. Signed-off-by: Peter Hurley Acked-by: Ingo Molnar Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1359337882-21821-1-git-send-email-peter@hurleysoftware.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 16 ++++++++++++++-- tools/perf/builtin-bench.c | 4 ++++ tools/perf/config/feature-tests.mak | 11 +++++++++++ 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b62dbc0d974a..4b1044cbd84c 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -47,6 +47,8 @@ include config/utilities.mak # backtrace post unwind. # # Define NO_BACKTRACE if you do not want stack backtrace debug feature +# +# Define NO_LIBNUMA if you do not want numa perf benchmark $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) @@ -103,7 +105,7 @@ ifdef PARSER_DEBUG endif CFLAGS = -fno-omit-frame-pointer -ggdb3 -funwind-tables -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS) -EXTLIBS = -lpthread -lrt -lelf -lm -lnuma +EXTLIBS = -lpthread -lrt -lelf -lm ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE ALL_LDFLAGS = $(LDFLAGS) STRIP ?= strip @@ -492,7 +494,6 @@ LIB_OBJS += $(OUTPUT)tests/python-use.o BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o BUILTIN_OBJS += $(OUTPUT)builtin-bench.o # Benchmark modules -BUILTIN_OBJS += $(OUTPUT)bench/numa.o BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o ifeq ($(RAW_ARCH),x86_64) @@ -839,6 +840,17 @@ ifndef NO_BACKTRACE endif endif +ifndef NO_LIBNUMA + FLAGS_LIBNUMA = $(ALL_CFLAGS) $(ALL_LDFLAGS) -lnuma + ifneq ($(call try-cc,$(SOURCE_LIBNUMA),$(FLAGS_LIBNUMA),libnuma),y) + msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numa-libs-devel or libnuma-dev); + else + BASIC_CFLAGS += -DLIBNUMA_SUPPORT + BUILTIN_OBJS += $(OUTPUT)bench/numa.o + EXTLIBS += -lnuma + endif +endif + ifdef ASCIIDOC8 export ASCIIDOC8 endif diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index e5d514bf5365..77298bf892b8 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -35,6 +35,7 @@ struct bench_suite { /* sentinel: easy for help */ #define suite_all { "all", "Test all benchmark suites", NULL } +#ifdef LIBNUMA_SUPPORT static struct bench_suite numa_suites[] = { { "mem", "Benchmark for NUMA workloads", @@ -44,6 +45,7 @@ static struct bench_suite numa_suites[] = { NULL, NULL } }; +#endif static struct bench_suite sched_suites[] = { { "messaging", @@ -78,9 +80,11 @@ struct bench_subsys { }; static struct bench_subsys subsystems[] = { +#ifdef LIBNUMA_SUPPORT { "numa", "NUMA scheduling and MM behavior", numa_suites }, +#endif { "sched", "scheduler and IPC mechanism", sched_suites }, diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak index f5ac77485a4f..b4eabb44e381 100644 --- a/tools/perf/config/feature-tests.mak +++ b/tools/perf/config/feature-tests.mak @@ -225,3 +225,14 @@ int main(void) return on_exit(NULL, NULL); } endef + +define SOURCE_LIBNUMA +#include +#include + +int main(void) +{ + numa_available(); + return 0; +} +endef \ No newline at end of file -- cgit v1.2.3-59-g8ed1b From c7a79c47c683de6979a3e1a96dc723b0606c07ca Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 29 Jan 2013 12:47:43 +0100 Subject: perf evsel: Add prev_raw_count field This field will be used by commands which print counter deltas on regular timer intervals, such as perf stat -I. Signed-off-by: Stephane Eranian Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1359460064-3060-2-git-send-email-eranian@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 26 ++++++++++++++++++++++++++ tools/perf/util/evsel.h | 1 + 2 files changed, 27 insertions(+) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index e45332d08a58..dbdcca43cac6 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -659,6 +659,28 @@ void perf_evsel__delete(struct perf_evsel *evsel) free(evsel); } +static inline void compute_deltas(struct perf_evsel *evsel, + int cpu, + struct perf_counts_values *count) +{ + struct perf_counts_values tmp; + + if (!evsel->prev_raw_counts) + return; + + if (cpu == -1) { + tmp = evsel->prev_raw_counts->aggr; + evsel->prev_raw_counts->aggr = *count; + } else { + tmp = evsel->prev_raw_counts->cpu[cpu]; + evsel->prev_raw_counts->cpu[cpu] = *count; + } + + count->val = count->val - tmp.val; + count->ena = count->ena - tmp.ena; + count->run = count->run - tmp.run; +} + int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, int cpu, int thread, bool scale) { @@ -674,6 +696,8 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) return -errno; + compute_deltas(evsel, cpu, &count); + if (scale) { if (count.run == 0) count.val = 0; @@ -712,6 +736,8 @@ int __perf_evsel__read(struct perf_evsel *evsel, } } + compute_deltas(evsel, -1, aggr); + evsel->counts->scaled = 0; if (scale) { if (aggr->run == 0) { diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index c68d1b82e843..3a4cd60044ea 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -53,6 +53,7 @@ struct perf_evsel { struct xyarray *sample_id; u64 *id; struct perf_counts *counts; + struct perf_counts *prev_raw_counts; int idx; u32 ids; struct hists hists; -- cgit v1.2.3-59-g8ed1b From 13370a9b5bb88f7aa90e5be68972d95096b20a6d Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 29 Jan 2013 12:47:44 +0100 Subject: perf stat: Add interval printing This patch adds a new printing mode for perf stat. It allows interval printing. That means perf stat can now print event deltas at regular time interval. This is useful to detect phases in programs. The -I option enables interval printing. It expects an interval duration in milliseconds. Minimum is 100ms. Once, activated perf stat prints events deltas since last printout. All modes are supported. $ perf stat -I 1000 -e cycles noploop 10 noploop for 10 seconds # time counts events 1.000109853 2,388,560,546 cycles 2.000262846 2,393,332,358 cycles 3.000354131 2,393,176,537 cycles 4.000439503 2,393,203,790 cycles 5.000527075 2,393,167,675 cycles 6.000609052 2,393,203,670 cycles 7.000691082 2,393,175,678 cycles The output format makes it easy to feed into a plotting program such as gnuplot when the -I option is used in combination with the -x option: $ perf stat -x, -I 1000 -e cycles noploop 10 noploop for 10 seconds 1.000084113,2378775498,cycles 2.000245798,2391056897,cycles 3.000354445,2392089414,cycles 4.000459115,2390936603,cycles 5.000565341,2392108173,cycles Signed-off-by: Stephane Eranian Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1359460064-3060-3-git-send-email-eranian@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-stat.txt | 4 + tools/perf/builtin-stat.c | 157 +++++++++++++++++++++++++++++---- 2 files changed, 146 insertions(+), 15 deletions(-) diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index cf0c3107e06e..5289da3344e9 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -114,6 +114,10 @@ with it. --append may be used here. Examples: perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- make -s -j64 O=defconfig-build/ bzImage +-I msecs:: +--interval-print msecs:: + print count deltas every N milliseconds (minimum: 100ms) + example: perf stat -I 1000 -e cycles -a sleep 5 EXAMPLES -------- diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 1c2ac148a7d5..493043abd164 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -65,6 +65,10 @@ #define CNTR_NOT_SUPPORTED "" #define CNTR_NOT_COUNTED "" +static void print_stat(int argc, const char **argv); +static void print_counter_aggr(struct perf_evsel *counter, char *prefix); +static void print_counter(struct perf_evsel *counter, char *prefix); + static struct perf_evlist *evsel_list; static struct perf_target target = { @@ -87,6 +91,8 @@ static FILE *output = NULL; static const char *pre_cmd = NULL; static const char *post_cmd = NULL; static bool sync_run = false; +static unsigned int interval = 0; +static struct timespec ref_time; static volatile int done = 0; @@ -94,6 +100,28 @@ struct perf_stat { struct stats res_stats[3]; }; +static inline void diff_timespec(struct timespec *r, struct timespec *a, + struct timespec *b) +{ + r->tv_sec = a->tv_sec - b->tv_sec; + if (a->tv_nsec < b->tv_nsec) { + r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec; + r->tv_sec--; + } else { + r->tv_nsec = a->tv_nsec - b->tv_nsec ; + } +} + +static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) +{ + return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; +} + +static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) +{ + return perf_evsel__cpus(evsel)->nr; +} + static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) { evsel->priv = zalloc(sizeof(struct perf_stat)); @@ -106,14 +134,27 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) evsel->priv = NULL; } -static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) +static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel) { - return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; + void *addr; + size_t sz; + + sz = sizeof(*evsel->counts) + + (perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values)); + + addr = zalloc(sz); + if (!addr) + return -ENOMEM; + + evsel->prev_raw_counts = addr; + + return 0; } -static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) +static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) { - return perf_evsel__cpus(evsel)->nr; + free(evsel->prev_raw_counts); + evsel->prev_raw_counts = NULL; } static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; @@ -245,16 +286,69 @@ static int read_counter(struct perf_evsel *counter) return 0; } +static void print_interval(void) +{ + static int num_print_interval; + struct perf_evsel *counter; + struct perf_stat *ps; + struct timespec ts, rs; + char prefix[64]; + + if (no_aggr) { + list_for_each_entry(counter, &evsel_list->entries, node) { + ps = counter->priv; + memset(ps->res_stats, 0, sizeof(ps->res_stats)); + read_counter(counter); + } + } else { + list_for_each_entry(counter, &evsel_list->entries, node) { + ps = counter->priv; + memset(ps->res_stats, 0, sizeof(ps->res_stats)); + read_counter_aggr(counter); + } + } + clock_gettime(CLOCK_MONOTONIC, &ts); + diff_timespec(&rs, &ts, &ref_time); + sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep); + + if (num_print_interval == 0 && !csv_output) { + if (no_aggr) + fprintf(output, "# time CPU counts events\n"); + else + fprintf(output, "# time counts events\n"); + } + + if (++num_print_interval == 25) + num_print_interval = 0; + + if (no_aggr) { + list_for_each_entry(counter, &evsel_list->entries, node) + print_counter(counter, prefix); + } else { + list_for_each_entry(counter, &evsel_list->entries, node) + print_counter_aggr(counter, prefix); + } +} + static int __run_perf_stat(int argc __maybe_unused, const char **argv) { char msg[512]; unsigned long long t0, t1; struct perf_evsel *counter; + struct timespec ts; int status = 0; int child_ready_pipe[2], go_pipe[2]; const bool forks = (argc > 0); char buf; + if (interval) { + ts.tv_sec = interval / 1000; + ts.tv_nsec = (interval % 1000) * 1000000; + } else { + ts.tv_sec = 1; + ts.tv_nsec = 0; + } + if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { perror("failed to create pipes"); return -1; @@ -347,14 +441,25 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv) * Enable counters and exec the command: */ t0 = rdclock(); + clock_gettime(CLOCK_MONOTONIC, &ref_time); if (forks) { close(go_pipe[1]); + if (interval) { + while (!waitpid(child_pid, &status, WNOHANG)) { + nanosleep(&ts, NULL); + print_interval(); + } + } wait(&status); if (WIFSIGNALED(status)) psignal(WTERMSIG(status), argv[0]); } else { - while(!done) sleep(1); + while (!done) { + nanosleep(&ts, NULL); + if (interval) + print_interval(); + } } t1 = rdclock(); @@ -440,7 +545,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) if (evsel->cgrp) fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); - if (csv_output) + if (csv_output || interval) return; if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) @@ -654,12 +759,11 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (evsel->cgrp) fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); - if (csv_output) + if (csv_output || interval) return; if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { total = avg_stats(&runtime_cycles_stats[cpu]); - if (total) ratio = avg / total; @@ -753,12 +857,15 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) * Print out the results of a single counter: * aggregated counts in system-wide mode */ -static void print_counter_aggr(struct perf_evsel *counter) +static void print_counter_aggr(struct perf_evsel *counter, char *prefix) { struct perf_stat *ps = counter->priv; double avg = avg_stats(&ps->res_stats[0]); int scaled = counter->counts->scaled; + if (prefix) + fprintf(output, "%s", prefix); + if (scaled == -1) { fprintf(output, "%*s%s%*s", csv_output ? 0 : 18, @@ -801,7 +908,7 @@ static void print_counter_aggr(struct perf_evsel *counter) * Print out the results of a single counter: * does not use aggregated count in system-wide */ -static void print_counter(struct perf_evsel *counter) +static void print_counter(struct perf_evsel *counter, char *prefix) { u64 ena, run, val; int cpu; @@ -810,6 +917,10 @@ static void print_counter(struct perf_evsel *counter) val = counter->counts->cpu[cpu].val; ena = counter->counts->cpu[cpu].ena; run = counter->counts->cpu[cpu].run; + + if (prefix) + fprintf(output, "%s", prefix); + if (run == 0 || ena == 0) { fprintf(output, "CPU%*d%s%*s%s%*s", csv_output ? 0 : -4, @@ -871,10 +982,10 @@ static void print_stat(int argc, const char **argv) if (no_aggr) { list_for_each_entry(counter, &evsel_list->entries, node) - print_counter(counter); + print_counter(counter, NULL); } else { list_for_each_entry(counter, &evsel_list->entries, node) - print_counter_aggr(counter); + print_counter_aggr(counter, NULL); } if (!csv_output) { @@ -895,7 +1006,7 @@ static volatile int signr = -1; static void skip_signal(int signo) { - if(child_pid == -1) + if ((child_pid == -1) || interval) done = 1; signr = signo; @@ -1115,6 +1226,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) "command to run prior to the measured command"), OPT_STRING(0, "post", &post_cmd, "command", "command to run after to the measured command"), + OPT_UINTEGER('I', "interval-print", &interval, + "print counts at regular interval in ms (>= 100)"), OPT_END() }; const char * const stat_usage[] = { @@ -1215,12 +1328,23 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) usage_with_options(stat_usage, options); return -1; } + if (interval && interval < 100) { + pr_err("print interval must be >= 100ms\n"); + usage_with_options(stat_usage, options); + return -1; + } list_for_each_entry(pos, &evsel_list->entries, node) { if (perf_evsel__alloc_stat_priv(pos) < 0 || perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0) goto out_free_fd; } + if (interval) { + list_for_each_entry(pos, &evsel_list->entries, node) { + if (perf_evsel__alloc_prev_raw_counts(pos) < 0) + goto out_free_fd; + } + } /* * We dont want to block the signals - that would cause @@ -1230,6 +1354,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) */ atexit(sig_atexit); signal(SIGINT, skip_signal); + signal(SIGCHLD, skip_signal); signal(SIGALRM, skip_signal); signal(SIGABRT, skip_signal); @@ -1242,11 +1367,13 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) status = run_perf_stat(argc, argv); } - if (status != -1) + if (status != -1 && !interval) print_stat(argc, argv); out_free_fd: - list_for_each_entry(pos, &evsel_list->entries, node) + list_for_each_entry(pos, &evsel_list->entries, node) { perf_evsel__free_stat_priv(pos); + perf_evsel__free_prev_raw_counts(pos); + } perf_evlist__delete_maps(evsel_list); out: perf_evlist__delete(evsel_list); -- cgit v1.2.3-59-g8ed1b From 43f8e76e6b96eb1327cff62ac1cc733a51f31068 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 25 Jan 2013 10:44:44 +0900 Subject: perf evsel: Fix memory leaks on evsel->counts The ->counts field was never freed in the current code. Add perf_evsel__free_counts() function to free it properly. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1359078284-32080-1-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 1 + tools/perf/tests/open-syscall-all-cpus.c | 1 + tools/perf/util/evsel.c | 5 +++++ tools/perf/util/evsel.h | 1 + 4 files changed, 8 insertions(+) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 493043abd164..0368a1036ad6 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1372,6 +1372,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) out_free_fd: list_for_each_entry(pos, &evsel_list->entries, node) { perf_evsel__free_stat_priv(pos); + perf_evsel__free_counts(pos); perf_evsel__free_prev_raw_counts(pos); } perf_evlist__delete_maps(evsel_list); diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c index 9b920a0cce79..b0657a9ccda6 100644 --- a/tools/perf/tests/open-syscall-all-cpus.c +++ b/tools/perf/tests/open-syscall-all-cpus.c @@ -98,6 +98,7 @@ int test__open_syscall_event_on_all_cpus(void) } } + perf_evsel__free_counts(evsel); out_close_fd: perf_evsel__close_fd(evsel, 1, threads->nr); out_evsel_delete: diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index dbdcca43cac6..baa26ddbcc7b 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -640,6 +640,11 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) } } +void perf_evsel__free_counts(struct perf_evsel *evsel) +{ + free(evsel->counts); +} + void perf_evsel__exit(struct perf_evsel *evsel) { assert(list_empty(&evsel->node)); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 3a4cd60044ea..cbf42322a27e 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -117,6 +117,7 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); void perf_evsel__free_fd(struct perf_evsel *evsel); void perf_evsel__free_id(struct perf_evsel *evsel); +void perf_evsel__free_counts(struct perf_evsel *evsel); void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, -- cgit v1.2.3-59-g8ed1b From e3541ec75219819d3235f80125a1a75d798ff6e1 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 23 Jan 2013 21:44:39 -0800 Subject: perf tools, powerpc: Fix compile warnings in tests/attr.c We print several '__u64' quantities using '%llu'. On powerpc, we by default include ' which results in __u64 being an unsigned long. This causes compile warnings which are treated as errors due to '-Werror'. By defining __SANE_USERSPACE_TYPES__ we include and define __u64 as unsigned long long. Changelog[v2]: [Michael Ellerman] Use __SANE_USERSPACE_TYPES__ and avoid PRIu64 format specifier - which as Jiri Olsa pointed out, breaks on x86-64. Signed-off-by: Sukadev Bhattiprolu Cc: Anton Blanchard Cc: Jiri Olsa Cc: Michael Ellerman Cc: linuxppc-dev@ozlabs.org Link: http://lkml.kernel.org/r/20130124054439.GA31588@us.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/attr.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c index f61dd3fb546b..bdcceb886f77 100644 --- a/tools/perf/tests/attr.c +++ b/tools/perf/tests/attr.c @@ -19,6 +19,11 @@ * permissions. All the event text files are stored there. */ +/* + * Powerpc needs __SANE_USERSPACE_TYPES__ before to select + * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu. + */ +#define __SANE_USERSPACE_TYPES__ #include #include #include -- cgit v1.2.3-59-g8ed1b From 68c465ada54c730d653fc6fdc9dc0d5270b2de00 Mon Sep 17 00:00:00 2001 From: Thomas Jarosch Date: Fri, 25 Jan 2013 10:57:08 +0100 Subject: perf tools: Fix possible double free on error Can only be triggered via CROSS_COMPILE env var. Detected by cppcheck. Signed-off-by: Thomas Jarosch Link: http://lkml.kernel.org/r/36736865.AIlztKhDqN@storm Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/common.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c index 3e975cb6232e..aacef07ebf31 100644 --- a/tools/perf/arch/common.c +++ b/tools/perf/arch/common.c @@ -155,6 +155,7 @@ static int perf_session_env__lookup_binutils_path(struct perf_session_env *env, if (lookup_path(buf)) goto out; free(buf); + buf = NULL; } if (!strcmp(arch, "arm")) -- cgit v1.2.3-59-g8ed1b From 8eb44dd76ac994b020e5cfe72635c90d9e0ad995 Mon Sep 17 00:00:00 2001 From: Thomas Jarosch Date: Fri, 25 Jan 2013 11:02:13 +0100 Subject: perf sort: Use pclose() instead of fclose() on pipe stream cppcheck message: [tools/perf/util/sort.c:277]: (error) Mismatching allocation and deallocation: fp Also fix descriptor leak on error and always initialize the "fp" variable. Signed-off-by: Thomas Jarosch Link: http://lkml.kernel.org/r/1359112354.yZcisNZ4k0@storm Link: http://lkml.kernel.org/r/2266358.qvDXKLvJ67@storm Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 7ad62393aa88..83336610faa9 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -249,7 +249,7 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width __maybe_unused) { - FILE *fp; + FILE *fp = NULL; char cmd[PATH_MAX + 2], *path = self->srcline, *nl; size_t line_len; @@ -270,7 +270,6 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, if (getline(&path, &line_len, fp) < 0 || !line_len) goto out_ip; - fclose(fp); self->srcline = strdup(path); if (self->srcline == NULL) goto out_ip; @@ -280,8 +279,12 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, *nl = '\0'; path = self->srcline; out_path: + if (fp) + pclose(fp); return repsep_snprintf(bf, size, "%s", path); out_ip: + if (fp) + pclose(fp); return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip); } -- cgit v1.2.3-59-g8ed1b From 0b9e01a4f0d1c8277da6824fe060ccb0434d2fde Mon Sep 17 00:00:00 2001 From: Thomas Jarosch Date: Fri, 25 Jan 2013 11:20:47 +0100 Subject: perf tools: Fix memory leak on error cppcheck reported: [util/event.c:480]: (error) Memory leak: event Signed-off-by: Thomas Jarosch Link: http://lkml.kernel.org/r/2717013.8dV0naNhAV@storm Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/event.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 3cf2c3e0605f..5cd13d768cec 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -476,8 +476,10 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, } } - if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) + if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) { + free(event); return -ENOENT; + } map = machine->vmlinux_maps[MAP__FUNCTION]; size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), -- cgit v1.2.3-59-g8ed1b From fdae6373910c10e27b8ae07e11e821b183d0bba5 Mon Sep 17 00:00:00 2001 From: Thomas Jarosch Date: Fri, 25 Jan 2013 11:21:39 +0100 Subject: perf header: Fix memory leak for the "Not caching a kptr_restrict'ed /proc/kallsyms" case cppcheck reported: [util/header.c:316]: (error) Memory leak: filename [util/header.c:316]: (error) Memory leak: linkname Signed-off-by: Thomas Jarosch Link: http://lkml.kernel.org/r/9377388.0eFDp53iW6@storm Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index e17a8fe9c4d7..7b24cf3237d8 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -313,7 +313,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, if (is_kallsyms) { if (symbol_conf.kptr_restrict) { pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); - return 0; + err = 0; + goto out_free; } realname = (char *) name; } else -- cgit v1.2.3-59-g8ed1b From 5809fde040de2afa477a6c593ce2e8fd2c11d9d3 Mon Sep 17 00:00:00 2001 From: Thomas Jarosch Date: Mon, 28 Jan 2013 10:21:14 +0100 Subject: perf header: Fix double fclose() on do_write(fd, xxx) failure cppcheck reported: [util/header.c:983]: (error) Used file that is not opened. Thanks to Arnaldo Carvalho de Melo for pointing out that fclose(NULL) is undefined behavior -> protect against it. Signed-off-by: Thomas Jarosch Link: http://lkml.kernel.org/r/1751778.SZQB4fNdIh@storm Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 7b24cf3237d8..f6081cb3fca3 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -955,6 +955,7 @@ static int write_topo_node(int fd, int node) } fclose(fp); + fp = NULL; ret = do_write(fd, &mem_total, sizeof(u64)); if (ret) @@ -981,7 +982,8 @@ static int write_topo_node(int fd, int node) ret = do_write_string(fd, buf); done: free(buf); - fclose(fp); + if (fp) + fclose(fp); return ret; } -- cgit v1.2.3-59-g8ed1b From ad964704ba9326d027fc10fd0099b7c880e50172 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 29 Jan 2013 17:45:49 -0500 Subject: ring-buffer: Add stats field for amount read from trace ring buffer Add a stat about the number of events read from the ring buffer: # cat /debug/tracing/per_cpu/cpu0/stats entries: 39869 overrun: 870512 commit overrun: 0 bytes: 1449912 oldest event ts: 6561.368690 now ts: 6565.246426 dropped events: 0 read events: 112 <-- Added Signed-off-by: Steven Rostedt --- include/linux/ring_buffer.h | 1 + kernel/trace/ring_buffer.c | 18 ++++++++++++++++++ kernel/trace/trace.c | 3 +++ 3 files changed, 22 insertions(+) diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 519777e3fa01..1342e69542f3 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -167,6 +167,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu); u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 13950d9027cb..7244acde77b0 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3102,6 +3102,24 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) } EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); +/** + * ring_buffer_read_events_cpu - get the number of events successfully read + * @buffer: The ring buffer + * @cpu: The per CPU buffer to get the number of events read + */ +unsigned long +ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer; + + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return 0; + + cpu_buffer = buffer->buffers[cpu]; + return cpu_buffer->read; +} +EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); + /** * ring_buffer_entries - get the number of entries in a buffer * @buffer: The ring buffer diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d399592701aa..90a1c71fdbfc 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4430,6 +4430,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf, cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); trace_seq_printf(s, "dropped events: %ld\n", cnt); + cnt = ring_buffer_read_events_cpu(tr->buffer, cpu); + trace_seq_printf(s, "read events: %ld\n", cnt); + count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); kfree(s); -- cgit v1.2.3-59-g8ed1b From 5e67b51e3fb22ad43faf9589e9019ad9c6a00413 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 27 Dec 2012 11:49:45 +0900 Subject: tracing: Use sched_clock_cpu for trace_clock_global For systems with an unstable sched_clock, all cpu_clock() does is enable/ disable local irq during the call to sched_clock_cpu(). And for stable systems they are same. trace_clock_global() already disables interrupts, so it can call sched_clock_cpu() directly. Link: http://lkml.kernel.org/r/1356576585-28782-2-git-send-email-namhyung@kernel.org Signed-off-by: Namhyung Kim Signed-off-by: Steven Rostedt --- kernel/trace/trace_clock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 22b638b28e48..24bf48eabfcc 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void) local_irq_save(flags); this_cpu = raw_smp_processor_id(); - now = cpu_clock(this_cpu); + now = sched_clock_cpu(this_cpu); /* * If in an NMI context then dont risk lockups and return the * cpu_clock() time: -- cgit v1.2.3-59-g8ed1b From 2fd196ec1eab2623096e7fc7e6f3976160392bce Mon Sep 17 00:00:00 2001 From: Hiraku Toyooka Date: Wed, 26 Dec 2012 11:52:52 +0900 Subject: tracing: Replace static old_tracer check of tracer name Currently the trace buffer read functions use a static variable "old_tracer" for detecting if the current tracer changes. This was suitable for a single trace file ("trace"), but to add a snapshot feature that will use the same function for its file, a check against a static variable is not sufficient. To use the output functions for two different files, instead of storing the current tracer in a static variable, as the trace iterator descriptor contains a pointer to the original current tracer's name, that pointer can now be used to check if the current tracer has changed between different reads of the trace file. Link: http://lkml.kernel.org/r/20121226025252.3252.9276.stgit@liselsia Signed-off-by: Hiraku Toyooka Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 90a1c71fdbfc..2c724662a3e8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1948,18 +1948,20 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) static void *s_start(struct seq_file *m, loff_t *pos) { struct trace_iterator *iter = m->private; - static struct tracer *old_tracer; int cpu_file = iter->cpu_file; void *p = NULL; loff_t l = 0; int cpu; - /* copy the tracer to avoid using a global lock all around */ + /* + * copy the tracer to avoid using a global lock all around. + * iter->trace is a copy of current_trace, the pointer to the + * name may be used instead of a strcmp(), as iter->trace->name + * will point to the same string as current_trace->name. + */ mutex_lock(&trace_types_lock); - if (unlikely(old_tracer != current_trace && current_trace)) { - old_tracer = current_trace; + if (unlikely(current_trace && iter->trace->name != current_trace->name)) *iter->trace = *current_trace; - } mutex_unlock(&trace_types_lock); atomic_inc(&trace_record_cmdline_disabled); @@ -3494,7 +3496,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; - static struct tracer *old_tracer; ssize_t sret; /* return any leftover data */ @@ -3506,10 +3507,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, /* copy the tracer to avoid using a global lock all around */ mutex_lock(&trace_types_lock); - if (unlikely(old_tracer != current_trace && current_trace)) { - old_tracer = current_trace; + if (unlikely(current_trace && iter->trace->name != current_trace->name)) *iter->trace = *current_trace; - } mutex_unlock(&trace_types_lock); /* @@ -3665,7 +3664,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, .ops = &tracing_pipe_buf_ops, .spd_release = tracing_spd_release_pipe, }; - static struct tracer *old_tracer; ssize_t ret; size_t rem; unsigned int i; @@ -3675,10 +3673,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, /* copy the tracer to avoid using a global lock all around */ mutex_lock(&trace_types_lock); - if (unlikely(old_tracer != current_trace && current_trace)) { - old_tracer = current_trace; + if (unlikely(current_trace && iter->trace->name != current_trace->name)) *iter->trace = *current_trace; - } mutex_unlock(&trace_types_lock); mutex_lock(&iter->mutex); -- cgit v1.2.3-59-g8ed1b From debdd57f5145f3c6a4b3f8d0126abd1a2def7fc6 Mon Sep 17 00:00:00 2001 From: Hiraku Toyooka Date: Wed, 26 Dec 2012 11:53:00 +0900 Subject: tracing: Make a snapshot feature available from userspace Ftrace has a snapshot feature available from kernel space and latency tracers (e.g. irqsoff) are using it. This patch enables user applictions to take a snapshot via debugfs. Add "snapshot" debugfs file in "tracing" directory. snapshot: This is used to take a snapshot and to read the output of the snapshot. # echo 1 > snapshot This will allocate the spare buffer for snapshot (if it is not allocated), and take a snapshot. # cat snapshot This will show contents of the snapshot. # echo 0 > snapshot This will free the snapshot if it is allocated. Any other positive values will clear the snapshot contents if the snapshot is allocated, or return EINVAL if it is not allocated. Link: http://lkml.kernel.org/r/20121226025300.3252.86850.stgit@liselsia Cc: Jiri Olsa Cc: David Sharp Signed-off-by: Hiraku Toyooka [ Fixed irqsoff selftest and also a conflict with a change that fixes the update_max_tr. ] Signed-off-by: Steven Rostedt --- include/linux/ftrace_event.h | 3 + kernel/trace/Kconfig | 10 +++ kernel/trace/trace.c | 166 ++++++++++++++++++++++++++++++++++++------- kernel/trace/trace.h | 1 + 4 files changed, 154 insertions(+), 26 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 6f8d0b77006b..13a54d0bdfa8 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -83,6 +83,9 @@ struct trace_iterator { long idx; cpumask_var_t started; + + /* it's true when current open file is snapshot */ + bool snapshot; }; enum trace_iter_flags { diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index cdc9d284d24e..36567564e221 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -253,6 +253,16 @@ config FTRACE_SYSCALLS help Basic tracer to catch the syscall entry and exit events. +config TRACER_SNAPSHOT + bool "Create a snapshot trace buffer" + select TRACER_MAX_TRACE + help + Allow tracing users to take snapshot of the current buffer using the + ftrace interface, e.g.: + + echo 1 > /sys/kernel/debug/tracing/snapshot + cat snapshot + config TRACE_BRANCH_PROFILING bool select GENERIC_TRACER diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2c724662a3e8..70dce64b9ecf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -710,12 +710,11 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) WARN_ON_ONCE(!irqs_disabled()); - /* If we disabled the tracer, stop now */ - if (current_trace == &nop_trace) - return; - - if (WARN_ON_ONCE(!current_trace->use_max_tr)) + if (!current_trace->allocated_snapshot) { + /* Only the nop tracer should hit this when disabling */ + WARN_ON_ONCE(current_trace != &nop_trace); return; + } arch_spin_lock(&ftrace_max_lock); @@ -743,10 +742,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); - if (!current_trace->use_max_tr) { - WARN_ON_ONCE(1); + if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) return; - } arch_spin_lock(&ftrace_max_lock); @@ -866,10 +863,13 @@ int register_tracer(struct tracer *type) current_trace = type; - /* If we expanded the buffers, make sure the max is expanded too */ - if (ring_buffer_expanded && type->use_max_tr) - ring_buffer_resize(max_tr.buffer, trace_buf_size, - RING_BUFFER_ALL_CPUS); + if (type->use_max_tr) { + /* If we expanded the buffers, make sure the max is expanded too */ + if (ring_buffer_expanded) + ring_buffer_resize(max_tr.buffer, trace_buf_size, + RING_BUFFER_ALL_CPUS); + type->allocated_snapshot = true; + } /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); @@ -885,10 +885,14 @@ int register_tracer(struct tracer *type) /* Only reset on passing, to avoid touching corrupted buffers */ tracing_reset_online_cpus(tr); - /* Shrink the max buffer again */ - if (ring_buffer_expanded && type->use_max_tr) - ring_buffer_resize(max_tr.buffer, 1, - RING_BUFFER_ALL_CPUS); + if (type->use_max_tr) { + type->allocated_snapshot = false; + + /* Shrink the max buffer again */ + if (ring_buffer_expanded) + ring_buffer_resize(max_tr.buffer, 1, + RING_BUFFER_ALL_CPUS); + } printk(KERN_CONT "PASSED\n"); } @@ -1964,7 +1968,11 @@ static void *s_start(struct seq_file *m, loff_t *pos) *iter->trace = *current_trace; mutex_unlock(&trace_types_lock); - atomic_inc(&trace_record_cmdline_disabled); + if (iter->snapshot && iter->trace->use_max_tr) + return ERR_PTR(-EBUSY); + + if (!iter->snapshot) + atomic_inc(&trace_record_cmdline_disabled); if (*pos != iter->pos) { iter->ent = NULL; @@ -2003,7 +2011,11 @@ static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; - atomic_dec(&trace_record_cmdline_disabled); + if (iter->snapshot && iter->trace->use_max_tr) + return; + + if (!iter->snapshot) + atomic_dec(&trace_record_cmdline_disabled); trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } @@ -2438,7 +2450,7 @@ static const struct seq_operations tracer_seq_ops = { }; static struct trace_iterator * -__tracing_open(struct inode *inode, struct file *file) +__tracing_open(struct inode *inode, struct file *file, bool snapshot) { long cpu_file = (long) inode->i_private; struct trace_iterator *iter; @@ -2471,10 +2483,11 @@ __tracing_open(struct inode *inode, struct file *file) if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; - if (current_trace && current_trace->print_max) + if ((current_trace && current_trace->print_max) || snapshot) iter->tr = &max_tr; else iter->tr = &global_trace; + iter->snapshot = snapshot; iter->pos = -1; mutex_init(&iter->mutex); iter->cpu_file = cpu_file; @@ -2491,8 +2504,9 @@ __tracing_open(struct inode *inode, struct file *file) if (trace_clocks[trace_clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; - /* stop the trace while dumping */ - tracing_stop(); + /* stop the trace while dumping if we are not opening "snapshot" */ + if (!iter->snapshot) + tracing_stop(); if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { for_each_tracing_cpu(cpu) { @@ -2555,8 +2569,9 @@ static int tracing_release(struct inode *inode, struct file *file) if (iter->trace && iter->trace->close) iter->trace->close(iter); - /* reenable tracing if it was previously enabled */ - tracing_start(); + if (!iter->snapshot) + /* reenable tracing if it was previously enabled */ + tracing_start(); mutex_unlock(&trace_types_lock); mutex_destroy(&iter->mutex); @@ -2584,7 +2599,7 @@ static int tracing_open(struct inode *inode, struct file *file) } if (file->f_mode & FMODE_READ) { - iter = __tracing_open(inode, file); + iter = __tracing_open(inode, file, false); if (IS_ERR(iter)) ret = PTR_ERR(iter); else if (trace_flags & TRACE_ITER_LATENCY_FMT) @@ -3219,7 +3234,7 @@ static int tracing_set_tracer(const char *buf) if (current_trace && current_trace->reset) current_trace->reset(tr); - had_max_tr = current_trace && current_trace->use_max_tr; + had_max_tr = current_trace && current_trace->allocated_snapshot; current_trace = &nop_trace; if (had_max_tr && !t->use_max_tr) { @@ -3238,6 +3253,8 @@ static int tracing_set_tracer(const char *buf) */ ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); set_buffer_entries(&max_tr, 1); + tracing_reset_online_cpus(&max_tr); + current_trace->allocated_snapshot = false; } destroy_trace_option_files(topts); @@ -3248,6 +3265,7 @@ static int tracing_set_tracer(const char *buf) RING_BUFFER_ALL_CPUS); if (ret < 0) goto out; + t->allocated_snapshot = true; } if (t->init) { @@ -4066,6 +4084,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file) return single_open(file, tracing_clock_show, NULL); } +#ifdef CONFIG_TRACER_SNAPSHOT +static int tracing_snapshot_open(struct inode *inode, struct file *file) +{ + struct trace_iterator *iter; + int ret = 0; + + if (file->f_mode & FMODE_READ) { + iter = __tracing_open(inode, file, true); + if (IS_ERR(iter)) + ret = PTR_ERR(iter); + } + return ret; +} + +static ssize_t +tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, + loff_t *ppos) +{ + unsigned long val; + int ret; + + ret = tracing_update_buffers(); + if (ret < 0) + return ret; + + ret = kstrtoul_from_user(ubuf, cnt, 10, &val); + if (ret) + return ret; + + mutex_lock(&trace_types_lock); + + if (current_trace && current_trace->use_max_tr) { + ret = -EBUSY; + goto out; + } + + switch (val) { + case 0: + if (current_trace->allocated_snapshot) { + /* free spare buffer */ + ring_buffer_resize(max_tr.buffer, 1, + RING_BUFFER_ALL_CPUS); + set_buffer_entries(&max_tr, 1); + tracing_reset_online_cpus(&max_tr); + current_trace->allocated_snapshot = false; + } + break; + case 1: + if (!current_trace->allocated_snapshot) { + /* allocate spare buffer */ + ret = resize_buffer_duplicate_size(&max_tr, + &global_trace, RING_BUFFER_ALL_CPUS); + if (ret < 0) + break; + current_trace->allocated_snapshot = true; + } + + local_irq_disable(); + /* Now, we're going to swap */ + update_max_tr(&global_trace, current, smp_processor_id()); + local_irq_enable(); + break; + default: + if (current_trace->allocated_snapshot) + tracing_reset_online_cpus(&max_tr); + else + ret = -EINVAL; + break; + } + + if (ret >= 0) { + *ppos += cnt; + ret = cnt; + } +out: + mutex_unlock(&trace_types_lock); + return ret; +} +#endif /* CONFIG_TRACER_SNAPSHOT */ + + static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, @@ -4122,6 +4221,16 @@ static const struct file_operations trace_clock_fops = { .write = tracing_clock_write, }; +#ifdef CONFIG_TRACER_SNAPSHOT +static const struct file_operations snapshot_fops = { + .open = tracing_snapshot_open, + .read = seq_read, + .write = tracing_snapshot_write, + .llseek = tracing_seek, + .release = tracing_release, +}; +#endif /* CONFIG_TRACER_SNAPSHOT */ + struct ftrace_buffer_info { struct trace_array *tr; void *spare; @@ -4921,6 +5030,11 @@ static __init int tracer_init_debugfs(void) &ftrace_update_tot_cnt, &tracing_dyn_info_fops); #endif +#ifdef CONFIG_TRACER_SNAPSHOT + trace_create_file("snapshot", 0644, d_tracer, + (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops); +#endif + create_trace_options_dir(); for_each_tracing_cpu(cpu) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 04a2c7ab1735..57d7e5397d56 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -287,6 +287,7 @@ struct tracer { struct tracer_flags *flags; bool print_max; bool use_max_tr; + bool allocated_snapshot; }; -- cgit v1.2.3-59-g8ed1b From c1043fcda1b9e8e5144cfdaee7be262c50dbdead Mon Sep 17 00:00:00 2001 From: Hiraku Toyooka Date: Wed, 26 Dec 2012 11:53:09 +0900 Subject: tracing: Add documentation of snapshot utility This patch adds snapshot description in ftrace documentation. This description includes what the snapshot is and how to use it. Link: http://lkml.kernel.org/r/20121226025309.3252.150.stgit@liselsia Cc: Rob Landley Signed-off-by: Hiraku Toyooka Signed-off-by: Steven Rostedt --- Documentation/trace/ftrace.txt | 83 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index 6f51fed45f2d..53d6a3c51d87 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt @@ -1842,6 +1842,89 @@ an error. # cat buffer_size_kb 85 +Snapshot +-------- +CONFIG_TRACER_SNAPSHOT makes a generic snapshot feature +available to all non latency tracers. (Latency tracers which +record max latency, such as "irqsoff" or "wakeup", can't use +this feature, since those are already using the snapshot +mechanism internally.) + +Snapshot preserves a current trace buffer at a particular point +in time without stopping tracing. Ftrace swaps the current +buffer with a spare buffer, and tracing continues in the new +current (=previous spare) buffer. + +The following debugfs files in "tracing" are related to this +feature: + + snapshot: + + This is used to take a snapshot and to read the output + of the snapshot. Echo 1 into this file to allocate a + spare buffer and to take a snapshot (swap), then read + the snapshot from this file in the same format as + "trace" (described above in the section "The File + System"). Both reads snapshot and tracing are executable + in parallel. When the spare buffer is allocated, echoing + 0 frees it, and echoing else (positive) values clear the + snapshot contents. + More details are shown in the table below. + + status\input | 0 | 1 | else | + --------------+------------+------------+------------+ + not allocated |(do nothing)| alloc+swap | EINVAL | + --------------+------------+------------+------------+ + allocated | free | swap | clear | + --------------+------------+------------+------------+ + +Here is an example of using the snapshot feature. + + # echo 1 > events/sched/enable + # echo 1 > snapshot + # cat snapshot +# tracer: nop +# +# entries-in-buffer/entries-written: 71/71 #P:8 +# +# _-----=> irqs-off +# / _----=> need-resched +# | / _---=> hardirq/softirq +# || / _--=> preempt-depth +# ||| / delay +# TASK-PID CPU# |||| TIMESTAMP FUNCTION +# | | | |||| | | + -0 [005] d... 2440.603828: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2242 next_prio=120 + sleep-2242 [005] d... 2440.603846: sched_switch: prev_comm=snapshot-test-2 prev_pid=2242 prev_prio=120 prev_state=R ==> next_comm=kworker/5:1 next_pid=60 next_prio=120 +[...] + -0 [002] d... 2440.707230: sched_switch: prev_comm=swapper/2 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2229 next_prio=120 + + # cat trace +# tracer: nop +# +# entries-in-buffer/entries-written: 77/77 #P:8 +# +# _-----=> irqs-off +# / _----=> need-resched +# | / _---=> hardirq/softirq +# || / _--=> preempt-depth +# ||| / delay +# TASK-PID CPU# |||| TIMESTAMP FUNCTION +# | | | |||| | | + -0 [007] d... 2440.707395: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2243 next_prio=120 + snapshot-test-2-2229 [002] d... 2440.707438: sched_switch: prev_comm=snapshot-test-2 prev_pid=2229 prev_prio=120 prev_state=S ==> next_comm=swapper/2 next_pid=0 next_prio=120 +[...] + + +If you try to use this snapshot feature when current tracer is +one of the latency tracers, you will get the following results. + + # echo wakeup > current_tracer + # echo 1 > snapshot +bash: echo: write error: Device or resource busy + # cat snapshot +cat: snapshot: Device or resource busy + ----------- More details can be found in the source code, in the -- cgit v1.2.3-59-g8ed1b From 11859e821761e9738c4d8a0e7d6ca1cc2e0d37e8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 30 Jan 2013 13:25:53 -0300 Subject: perf top: Stop using exit() Just return to the perf main() routine so that an unified exit path can be followed and resources released, helping in finding memory leaks. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-ro8oeodo96490nrhcph57atr@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-top.c | 56 ++++++++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 7978c8117b7f..903e4f4a3047 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -68,6 +68,8 @@ #include #include +static volatile int done; + static void perf_top__update_print_entries(struct perf_top *top) { if (top->print_entries > 9) @@ -431,8 +433,10 @@ static int perf_top__key_mapped(struct perf_top *top, int c) return 0; } -static void perf_top__handle_keypress(struct perf_top *top, int c) +static bool perf_top__handle_keypress(struct perf_top *top, int c) { + bool ret = true; + if (!perf_top__key_mapped(top, c)) { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; struct termios tc, save; @@ -453,7 +457,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) tcsetattr(0, TCSAFLUSH, &save); if (!perf_top__key_mapped(top, c)) - return; + return ret; } switch (c) { @@ -515,7 +519,8 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) printf("exiting.\n"); if (top->dump_symtab) perf_session__fprintf_dsos(top->session, stderr); - exit(0); + ret = false; + break; case 's': perf_top__prompt_symbol(top, "Enter details symbol"); break; @@ -538,6 +543,8 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) default: break; } + + return ret; } static void perf_top__sort_new_samples(void *arg) @@ -579,8 +586,7 @@ static void *display_thread_tui(void *arg) perf_evlist__tui_browse_hists(top->evlist, help, &hbt, &top->session->header.env); - exit_browser(0); - exit(0); + done = 1; return NULL; } @@ -604,7 +610,7 @@ repeat: /* trash return*/ getc(stdin); - while (1) { + while (!done) { perf_top__print_sym_table(top); /* * Either timeout expired or we got an EINTR due to SIGWINCH, @@ -618,15 +624,14 @@ repeat: continue; /* Fall trhu */ default: - goto process_hotkey; + c = getc(stdin); + tcsetattr(0, TCSAFLUSH, &save); + + if (perf_top__handle_keypress(top, c)) + goto repeat; + done = 1; } } -process_hotkey: - c = getc(stdin); - tcsetattr(0, TCSAFLUSH, &save); - - perf_top__handle_keypress(top, c); - goto repeat; return NULL; } @@ -705,7 +710,7 @@ static void perf_event__process_sample(struct perf_tool *tool, } if (!machine) { - pr_err("%u unprocessable samples recorded.\n", + pr_err("%u unprocessable samples recorded.\r", top->session->stats.nr_unprocessable_samples++); return; } @@ -868,7 +873,7 @@ static void perf_top__mmap_read(struct perf_top *top) perf_top__mmap_read_idx(top, i); } -static void perf_top__start_counters(struct perf_top *top) +static int perf_top__start_counters(struct perf_top *top) { char msg[512]; struct perf_evsel *counter; @@ -900,11 +905,10 @@ try_again: goto out_err; } - return; + return 0; out_err: - exit_browser(0); - exit(0); + return -1; } static int perf_top__setup_sample_type(struct perf_top *top) @@ -948,7 +952,11 @@ static int __cmd_top(struct perf_top *top) else perf_event__synthesize_threads(&top->tool, perf_event__process, &top->session->machines.host); - perf_top__start_counters(top); + + ret = perf_top__start_counters(top); + if (ret) + goto out_delete; + top->session->evlist = top->evlist; perf_session__set_id_hdr_size(top->session); @@ -968,10 +976,11 @@ static int __cmd_top(struct perf_top *top) perf_top__mmap_read(top); + ret = -1; if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui : display_thread), top)) { ui__error("Could not create display thread.\n"); - exit(-1); + goto out_delete; } if (top->realtime_prio) { @@ -980,11 +989,11 @@ static int __cmd_top(struct perf_top *top) param.sched_priority = top->realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { ui__error("Could not set realtime priority.\n"); - exit(-1); + goto out_delete; } } - while (1) { + while (!done) { u64 hits = top->samples; perf_top__mmap_read(top); @@ -993,11 +1002,12 @@ static int __cmd_top(struct perf_top *top) ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100); } + ret = 0; out_delete: perf_session__delete(top->session); top->session = NULL; - return 0; + return ret; } static int -- cgit v1.2.3-59-g8ed1b From 0de233b9c4f8c83b2cb655bfdbec306c8da81199 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 30 Jan 2013 14:01:20 -0300 Subject: perf top: Delete maps on exit Removing one more memory leak found with valgrind. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-gnb1gms0k8wictmtm2umpr8u@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-top.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 903e4f4a3047..f561757b1bfa 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1164,7 +1164,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) if (!top.evlist->nr_entries && perf_evlist__add_default(top.evlist) < 0) { ui__error("Not enough memory for event selector list\n"); - return -ENOMEM; + goto out_delete_maps; } symbol_conf.nr_events = top.evlist->nr_entries; @@ -1187,7 +1187,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) } else { ui__error("frequency and count are zero, aborting\n"); status = -EINVAL; - goto out_delete_evlist; + goto out_delete_maps; } top.sym_evsel = perf_evlist__first(top.evlist); @@ -1220,6 +1220,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) status = __cmd_top(&top); +out_delete_maps: + perf_evlist__delete_maps(top.evlist); out_delete_evlist: perf_evlist__delete(top.evlist); -- cgit v1.2.3-59-g8ed1b From 97f63e4a2cf88e9d7bc086a1c3f10fa41c9174df Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:29 +0900 Subject: perf tools: Keep group information Add a few of group-related field in struct perf_{evlist,evsel} so that the group information in a evlist can be known easily. It only counts groups which have more than 1 members since leader-only groups are treated as non-group events. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evlist.c | 7 ++++++- tools/perf/util/evlist.h | 1 + tools/perf/util/evsel.h | 6 ++++++ tools/perf/util/parse-events.c | 1 + tools/perf/util/parse-events.h | 1 + tools/perf/util/parse-events.y | 10 ++++++++++ 6 files changed, 25 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index dc8aee97a488..eddd5ebcd690 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -117,6 +117,9 @@ void __perf_evlist__set_leader(struct list_head *list) struct perf_evsel *evsel, *leader; leader = list_entry(list->next, struct perf_evsel, node); + evsel = list_entry(list->prev, struct perf_evsel, node); + + leader->nr_members = evsel->idx - leader->idx + 1; list_for_each_entry(evsel, list, node) { if (evsel != leader) @@ -126,8 +129,10 @@ void __perf_evlist__set_leader(struct list_head *list) void perf_evlist__set_leader(struct perf_evlist *evlist) { - if (evlist->nr_entries) + if (evlist->nr_entries) { + evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; __perf_evlist__set_leader(&evlist->entries); + } } int perf_evlist__add_default(struct perf_evlist *evlist) diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 457e2350d21d..73579a25a93e 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -21,6 +21,7 @@ struct perf_evlist { struct list_head entries; struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; int nr_entries; + int nr_groups; int nr_fds; int nr_mmaps; int mmap_len; diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index cbf42322a27e..c9031ebf196e 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -74,6 +74,7 @@ struct perf_evsel { bool needs_swap; /* parse modifier helper */ int exclude_GH; + int nr_members; struct perf_evsel *leader; char *group_name; }; @@ -259,4 +260,9 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err, int perf_evsel__open_strerror(struct perf_evsel *evsel, struct perf_target *target, int err, char *msg, size_t size); + +static inline int perf_evsel__group_idx(struct perf_evsel *evsel) +{ + return evsel->idx - evsel->leader->idx; +} #endif /* __PERF_EVSEL_H */ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 02f6421f03a0..4e0f5c2a9fda 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -884,6 +884,7 @@ int parse_events(struct perf_evlist *evlist, const char *str) if (!ret) { int entries = data.idx - evlist->nr_entries; perf_evlist__splice_list_tail(evlist, &data.list, entries); + evlist->nr_groups += data.nr_groups; return 0; } diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 2cd2c42a69c5..8a4859315fd9 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -64,6 +64,7 @@ struct parse_events_term { struct parse_events_evlist { struct list_head list; int idx; + int nr_groups; }; struct parse_events_terms { diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 9d43c86176ff..4de2fdca98c8 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -23,6 +23,14 @@ do { \ YYABORT; \ } while (0) +static inc_group_count(struct list_head *list, + struct parse_events_evlist *data) +{ + /* Count groups only have more than 1 members */ + if (!list_is_last(list->next, list)) + data->nr_groups++; +} + %} %token PE_START_EVENTS PE_START_TERMS @@ -123,6 +131,7 @@ PE_NAME '{' events '}' { struct list_head *list = $3; + inc_group_count(list, _data); parse_events__set_leader($1, list); $$ = list; } @@ -131,6 +140,7 @@ PE_NAME '{' events '}' { struct list_head *list = $2; + inc_group_count(list, _data); parse_events__set_leader(NULL, list); $$ = list; } -- cgit v1.2.3-59-g8ed1b From 8d7d8474d7b04dc89aa653d67425b61d3ff5c6f0 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:30 +0900 Subject: perf tests: Add group test conditions As some new fields for handling groups added, check them to be sure to have valid values in test__group* cases. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/parse-events.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 20acaff295d2..80a8daf54a63 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -23,6 +23,7 @@ static int test__checkevent_tracepoint(struct perf_evlist *evlist) struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups); TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); TEST_ASSERT_VAL("wrong sample_type", PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); @@ -35,6 +36,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist) struct perf_evsel *evsel; TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1); + TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups); list_for_each_entry(evsel, &evlist->entries, node) { TEST_ASSERT_VAL("wrong type", @@ -510,6 +512,7 @@ static int test__group1(struct perf_evlist *evlist) struct perf_evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); /* instructions:k */ evsel = leader = perf_evlist__first(evlist); @@ -523,6 +526,8 @@ static int test__group1(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); /* cycles:upp */ evsel = perf_evsel__next(evsel); @@ -537,6 +542,7 @@ static int test__group1(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); return 0; } @@ -546,6 +552,7 @@ static int test__group2(struct perf_evlist *evlist) struct perf_evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); /* faults + :ku modifier */ evsel = leader = perf_evlist__first(evlist); @@ -559,6 +566,8 @@ static int test__group2(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); /* cache-references + :u modifier */ evsel = perf_evsel__next(evsel); @@ -572,6 +581,7 @@ static int test__group2(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); /* cycles:k */ evsel = perf_evsel__next(evsel); @@ -594,6 +604,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) struct perf_evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups); /* group1 syscalls:sys_enter_open:H */ evsel = leader = perf_evlist__first(evlist); @@ -610,6 +621,8 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); TEST_ASSERT_VAL("wrong group name", !strcmp(leader->group_name, "group1")); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); /* group1 cycles:kppp */ evsel = perf_evsel__next(evsel); @@ -625,6 +638,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); /* group2 cycles + G modifier */ evsel = leader = perf_evsel__next(evsel); @@ -640,6 +654,8 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); TEST_ASSERT_VAL("wrong group name", !strcmp(leader->group_name, "group2")); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); /* group2 1:3 + G modifier */ evsel = perf_evsel__next(evsel); @@ -652,6 +668,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); /* instructions:u */ evsel = perf_evsel__next(evsel); @@ -674,6 +691,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused) struct perf_evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); /* cycles:u + p */ evsel = leader = perf_evlist__first(evlist); @@ -689,6 +707,8 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); /* instructions:kp + p */ evsel = perf_evsel__next(evsel); @@ -703,6 +723,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); return 0; } @@ -712,6 +733,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) struct perf_evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups); /* cycles + G */ evsel = leader = perf_evlist__first(evlist); @@ -726,6 +748,8 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); /* instructions + G */ evsel = perf_evsel__next(evsel); @@ -739,6 +763,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); /* cycles:G */ evsel = leader = perf_evsel__next(evsel); @@ -753,6 +778,8 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); /* instructions:G */ evsel = perf_evsel__next(evsel); @@ -766,6 +793,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); /* cycles */ evsel = perf_evsel__next(evsel); -- cgit v1.2.3-59-g8ed1b From a8bb559bd4eff5c71601e2e61a4bd1deef44a03c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:31 +0900 Subject: perf header: Add HEADER_GROUP_DESC feature Save group relationship information so that it can be restored when perf report is running. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-4-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 3 + tools/perf/util/header.c | 164 ++++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/header.h | 2 + 3 files changed, 169 insertions(+) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 2ac690cad411..774c90713a53 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -486,6 +486,9 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) goto out_delete_session; } + if (!evsel_list->nr_groups) + perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); + /* * perf_session__delete(session) will be called at perf_record__exit() */ diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index f6081cb3fca3..8022b5254f75 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1084,6 +1084,52 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, return 0; } +/* + * File format: + * + * struct group_descs { + * u32 nr_groups; + * struct group_desc { + * char name[]; + * u32 leader_idx; + * u32 nr_members; + * }[nr_groups]; + * }; + */ +static int write_group_desc(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist) +{ + u32 nr_groups = evlist->nr_groups; + struct perf_evsel *evsel; + int ret; + + ret = do_write(fd, &nr_groups, sizeof(nr_groups)); + if (ret < 0) + return ret; + + list_for_each_entry(evsel, &evlist->entries, node) { + if (perf_evsel__is_group_leader(evsel) && + evsel->nr_members > 1) { + const char *name = evsel->group_name ?: "{anon_group}"; + u32 leader_idx = evsel->idx; + u32 nr_members = evsel->nr_members; + + ret = do_write_string(fd, name); + if (ret < 0) + return ret; + + ret = do_write(fd, &leader_idx, sizeof(leader_idx)); + if (ret < 0) + return ret; + + ret = do_write(fd, &nr_members, sizeof(nr_members)); + if (ret < 0) + return ret; + } + } + return 0; +} + /* * default get_cpuid(): nothing gets recorded * actual implementation must be in arch/$(ARCH)/util/header.c @@ -1447,6 +1493,31 @@ error: fprintf(fp, "# pmu mappings: unable to read\n"); } +static void print_group_desc(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + struct perf_session *session; + struct perf_evsel *evsel; + u32 nr = 0; + + session = container_of(ph, struct perf_session, header); + + list_for_each_entry(evsel, &session->evlist->entries, node) { + if (perf_evsel__is_group_leader(evsel) && + evsel->nr_members > 1) { + fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", + perf_evsel__name(evsel)); + + nr = evsel->nr_members - 1; + } else if (nr) { + fprintf(fp, ",%s", perf_evsel__name(evsel)); + + if (--nr == 0) + fprintf(fp, "}\n"); + } + } +} + static int __event_process_build_id(struct build_id_event *bev, char *filename, struct perf_session *session) @@ -1961,6 +2032,98 @@ error: return -1; } +static int process_group_desc(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + size_t ret = -1; + u32 i, nr, nr_groups; + struct perf_session *session; + struct perf_evsel *evsel, *leader = NULL; + struct group_desc { + char *name; + u32 leader_idx; + u32 nr_members; + } *desc; + + if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups)) + return -1; + + if (ph->needs_swap) + nr_groups = bswap_32(nr_groups); + + ph->env.nr_groups = nr_groups; + if (!nr_groups) { + pr_debug("group desc not available\n"); + return 0; + } + + desc = calloc(nr_groups, sizeof(*desc)); + if (!desc) + return -1; + + for (i = 0; i < nr_groups; i++) { + desc[i].name = do_read_string(fd, ph); + if (!desc[i].name) + goto out_free; + + if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32)) + goto out_free; + + if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32)) + goto out_free; + + if (ph->needs_swap) { + desc[i].leader_idx = bswap_32(desc[i].leader_idx); + desc[i].nr_members = bswap_32(desc[i].nr_members); + } + } + + /* + * Rebuild group relationship based on the group_desc + */ + session = container_of(ph, struct perf_session, header); + session->evlist->nr_groups = nr_groups; + + i = nr = 0; + list_for_each_entry(evsel, &session->evlist->entries, node) { + if (evsel->idx == (int) desc[i].leader_idx) { + evsel->leader = evsel; + /* {anon_group} is a dummy name */ + if (strcmp(desc[i].name, "{anon_group}")) + evsel->group_name = desc[i].name; + evsel->nr_members = desc[i].nr_members; + + if (i >= nr_groups || nr > 0) { + pr_debug("invalid group desc\n"); + goto out_free; + } + + leader = evsel; + nr = evsel->nr_members - 1; + i++; + } else if (nr) { + /* This is a group member */ + evsel->leader = leader; + + nr--; + } + } + + if (i != nr_groups || nr != 0) { + pr_debug("invalid group desc\n"); + goto out_free; + } + + ret = 0; +out_free: + while ((int) --i >= 0) + free(desc[i].name); + free(desc); + + return ret; +} + struct feature_ops { int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); void (*print)(struct perf_header *h, int fd, FILE *fp); @@ -2000,6 +2163,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), + FEAT_OPP(HEADER_GROUP_DESC, group_desc), }; struct header_print_data { diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 20f0344accb1..c9fc55cada6d 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -29,6 +29,7 @@ enum { HEADER_NUMA_TOPOLOGY, HEADER_BRANCH_STACK, HEADER_PMU_MAPPINGS, + HEADER_GROUP_DESC, HEADER_LAST_FEATURE, HEADER_FEAT_BITS = 256, }; @@ -79,6 +80,7 @@ struct perf_session_env { char *numa_nodes; int nr_pmu_mappings; char *pmu_mappings; + int nr_groups; }; struct perf_header { -- cgit v1.2.3-59-g8ed1b From 6e1f601a10cbaa5cda869f844292dd81c519a8e7 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:32 +0900 Subject: perf report: Make another loop for linking group hists Now the event grouping viewing requires linking all member hists in a group to the leader's. Thus hists__output_resort should be called after linking all events in evlist. Introduce symbol_conf.event_group flag to determine whether the feature is enabled or not. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-5-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 13 ++++++++++++- tools/perf/util/symbol.h | 3 ++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 47a864478543..fd2503a5f876 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -416,8 +416,16 @@ static int __cmd_report(struct perf_report *rep) hists->symbol_filter_str = rep->symbol_filter_str; hists__collapse_resort(hists); - hists__output_resort(hists); nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE]; + + /* Non-group events are considered as leader */ + if (symbol_conf.event_group && + !perf_evsel__is_group_leader(pos)) { + struct hists *leader_hists = &pos->leader->hists; + + hists__match(leader_hists, hists); + hists__link(leader_hists, hists); + } } if (nr_samples == 0) { @@ -425,6 +433,9 @@ static int __cmd_report(struct perf_report *rep) goto out_delete; } + list_for_each_entry(pos, &session->evlist->entries, node) + hists__output_resort(&pos->hists); + if (use_browser > 0) { if (use_browser == 1) { perf_evlist__tui_browse_hists(session->evlist, help, diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index d97377ac2f16..b62ca37c4b77 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -96,7 +96,8 @@ struct symbol_conf { initialized, kptr_restrict, annotate_asm_raw, - annotate_src; + annotate_src, + event_group; const char *vmlinux_name, *kallsyms_name, *source_prefix, -- cgit v1.2.3-59-g8ed1b From 29d720ed5f897d7e26f6b36c12c7704dc200d107 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:33 +0900 Subject: perf hists: Resort hist entries using group members for output When event group is enabled, sorting hist entries on periods for output should consider groups members' period also. To do that, build period table using link/pair information and compare the table. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.h | 2 ++ tools/perf/util/hist.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index c9031ebf196e..371713977888 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -79,6 +79,8 @@ struct perf_evsel { char *group_name; }; +#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists) + struct cpu_map; struct thread_map; struct perf_evlist; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 8170a3d11ffa..f855941bebea 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -4,6 +4,7 @@ #include "hist.h" #include "session.h" #include "sort.h" +#include "evsel.h" #include static bool hists__filter_entry_by_dso(struct hists *hists, @@ -540,6 +541,62 @@ void hists__collapse_resort_threaded(struct hists *hists) * reverse the map, sort on period. */ +static int period_cmp(u64 period_a, u64 period_b) +{ + if (period_a > period_b) + return 1; + if (period_a < period_b) + return -1; + return 0; +} + +static int hist_entry__sort_on_period(struct hist_entry *a, + struct hist_entry *b) +{ + int ret; + int i, nr_members; + struct perf_evsel *evsel; + struct hist_entry *pair; + u64 *periods_a, *periods_b; + + ret = period_cmp(a->stat.period, b->stat.period); + if (ret || !symbol_conf.event_group) + return ret; + + evsel = hists_to_evsel(a->hists); + nr_members = evsel->nr_members; + if (nr_members <= 1) + return ret; + + periods_a = zalloc(sizeof(periods_a) * nr_members); + periods_b = zalloc(sizeof(periods_b) * nr_members); + + if (!periods_a || !periods_b) + goto out; + + list_for_each_entry(pair, &a->pairs.head, pairs.node) { + evsel = hists_to_evsel(pair->hists); + periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period; + } + + list_for_each_entry(pair, &b->pairs.head, pairs.node) { + evsel = hists_to_evsel(pair->hists); + periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period; + } + + for (i = 1; i < nr_members; i++) { + ret = period_cmp(periods_a[i], periods_b[i]); + if (ret) + break; + } + +out: + free(periods_a); + free(periods_b); + + return ret; +} + static void __hists__insert_output_entry(struct rb_root *entries, struct hist_entry *he, u64 min_callchain_hits) @@ -556,7 +613,7 @@ static void __hists__insert_output_entry(struct rb_root *entries, parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node); - if (he->stat.period > iter->stat.period) + if (hist_entry__sort_on_period(he, iter) > 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; -- cgit v1.2.3-59-g8ed1b From 4fb71074a570aab9ba8a30b7a756a3c637a14c03 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:34 +0900 Subject: perf ui/hist: Consolidate hpp helpers Most of hpp helper functions do same jobs for different fields thus consolidate them to appropriate functions/macros. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-7-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/hist.c | 238 ++++++++++++++++++--------------------------------- 1 file changed, 84 insertions(+), 154 deletions(-) diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 1889c12ca81f..849c40028e7b 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -3,151 +3,107 @@ #include "../util/hist.h" #include "../util/util.h" #include "../util/sort.h" - +#include "../util/evsel.h" /* hist period print (hpp) functions */ -static int hpp__header_overhead(struct perf_hpp *hpp) -{ - return scnprintf(hpp->buf, hpp->size, "Overhead"); -} - -static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused) -{ - return 8; -} - -static int hpp__color_overhead(struct perf_hpp *hpp, struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period / hists->stats.total_period; - - return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent); -} - -static int hpp__entry_overhead(struct perf_hpp *hpp, struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period / hists->stats.total_period; - const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%"; - - return scnprintf(hpp->buf, hpp->size, fmt, percent); -} - -static int hpp__header_overhead_sys(struct perf_hpp *hpp) -{ - const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; - - return scnprintf(hpp->buf, hpp->size, fmt, "sys"); -} - -static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused) -{ - return 7; -} - -static int hpp__color_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_sys / hists->stats.total_period; - return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent); -} +typedef int (*hpp_snprint_fn)(char *buf, size_t size, const char *fmt, ...); -static int hpp__entry_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he) +static int __hpp__percent_fmt(struct perf_hpp *hpp, struct hist_entry *he, + u64 (*get_field)(struct hist_entry *), + const char *fmt, hpp_snprint_fn print_fn) { + int ret; + double percent = 0.0; struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_sys / hists->stats.total_period; - const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%"; - - return scnprintf(hpp->buf, hpp->size, fmt, percent); -} - -static int hpp__header_overhead_us(struct perf_hpp *hpp) -{ - const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; - return scnprintf(hpp->buf, hpp->size, fmt, "user"); -} + if (hists->stats.total_period) + percent = 100.0 * get_field(he) / hists->stats.total_period; -static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused) -{ - return 7; + ret = print_fn(hpp->buf, hpp->size, fmt, percent); + return ret; } -static int hpp__color_overhead_us(struct perf_hpp *hpp, struct hist_entry *he) +static int __hpp__raw_fmt(struct perf_hpp *hpp, struct hist_entry *he, + u64 (*get_field)(struct hist_entry *), + const char *fmt, hpp_snprint_fn print_fn) { - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_us / hists->stats.total_period; + int ret; - return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent); + ret = print_fn(hpp->buf, hpp->size, fmt, get_field(he)); + return ret; } -static int hpp__entry_overhead_us(struct perf_hpp *hpp, struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_us / hists->stats.total_period; - const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%"; - return scnprintf(hpp->buf, hpp->size, fmt, percent); +#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ +static int hpp__header_##_type(struct perf_hpp *hpp) \ +{ \ + int len = _min_width; \ + \ + return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \ } -static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp) -{ - return scnprintf(hpp->buf, hpp->size, "guest sys"); +#define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +static int hpp__width_##_type(struct perf_hpp *hpp __maybe_unused) \ +{ \ + int len = _min_width; \ + \ + return len; \ } -static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused) -{ - return 9; +#define __HPP_COLOR_PERCENT_FN(_type, _field) \ +static u64 he_get_##_field(struct hist_entry *he) \ +{ \ + return he->stat._field; \ +} \ + \ +static int hpp__color_##_type(struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + return __hpp__percent_fmt(hpp, he, he_get_##_field, " %6.2f%%", \ + (hpp_snprint_fn)percent_color_snprintf); \ } -static int hpp__color_overhead_guest_sys(struct perf_hpp *hpp, - struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_guest_sys / hists->stats.total_period; - - return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent); +#define __HPP_ENTRY_PERCENT_FN(_type, _field) \ +static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \ + return __hpp__percent_fmt(hpp, he, he_get_##_field, fmt, \ + scnprintf); \ } -static int hpp__entry_overhead_guest_sys(struct perf_hpp *hpp, - struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_guest_sys / hists->stats.total_period; - const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% "; - - return scnprintf(hpp->buf, hpp->size, fmt, percent); +#define __HPP_ENTRY_RAW_FN(_type, _field) \ +static u64 he_get_raw_##_field(struct hist_entry *he) \ +{ \ + return he->stat._field; \ +} \ + \ +static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \ + return __hpp__raw_fmt(hpp, he, he_get_raw_##_field, fmt, scnprintf); \ } -static int hpp__header_overhead_guest_us(struct perf_hpp *hpp) -{ - return scnprintf(hpp->buf, hpp->size, "guest usr"); -} +#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \ +__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ +__HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +__HPP_COLOR_PERCENT_FN(_type, _field) \ +__HPP_ENTRY_PERCENT_FN(_type, _field) -static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused) -{ - return 9; -} +#define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \ +__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ +__HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +__HPP_ENTRY_RAW_FN(_type, _field) -static int hpp__color_overhead_guest_us(struct perf_hpp *hpp, - struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_guest_us / hists->stats.total_period; - return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent); -} +HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8) +HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8) +HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8) +HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8) +HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8) -static int hpp__entry_overhead_guest_us(struct perf_hpp *hpp, - struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_guest_us / hists->stats.total_period; - const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% "; +HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12) +HPP_RAW_FNS(period, "Period", period, 12, 12) - return scnprintf(hpp->buf, hpp->size, fmt, percent); -} static int hpp__header_baseline(struct perf_hpp *hpp) { @@ -179,7 +135,7 @@ static int hpp__color_baseline(struct perf_hpp *hpp, struct hist_entry *he) { double percent = baseline_percent(he); - if (hist_entry__has_pairs(he)) + if (hist_entry__has_pairs(he) || symbol_conf.field_sep) return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent); else return scnprintf(hpp->buf, hpp->size, " "); @@ -196,44 +152,6 @@ static int hpp__entry_baseline(struct perf_hpp *hpp, struct hist_entry *he) return scnprintf(hpp->buf, hpp->size, " "); } -static int hpp__header_samples(struct perf_hpp *hpp) -{ - const char *fmt = symbol_conf.field_sep ? "%s" : "%11s"; - - return scnprintf(hpp->buf, hpp->size, fmt, "Samples"); -} - -static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused) -{ - return 11; -} - -static int hpp__entry_samples(struct perf_hpp *hpp, struct hist_entry *he) -{ - const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%11" PRIu64; - - return scnprintf(hpp->buf, hpp->size, fmt, he->stat.nr_events); -} - -static int hpp__header_period(struct perf_hpp *hpp) -{ - const char *fmt = symbol_conf.field_sep ? "%s" : "%12s"; - - return scnprintf(hpp->buf, hpp->size, fmt, "Period"); -} - -static int hpp__width_period(struct perf_hpp *hpp __maybe_unused) -{ - return 12; -} - -static int hpp__entry_period(struct perf_hpp *hpp, struct hist_entry *he) -{ - const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64; - - return scnprintf(hpp->buf, hpp->size, fmt, he->stat.period); -} - static int hpp__header_period_baseline(struct perf_hpp *hpp) { const char *fmt = symbol_conf.field_sep ? "%s" : "%12s"; @@ -254,6 +172,7 @@ static int hpp__entry_period_baseline(struct perf_hpp *hpp, struct hist_entry *h return scnprintf(hpp->buf, hpp->size, fmt, period); } + static int hpp__header_delta(struct perf_hpp *hpp) { const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; @@ -408,9 +327,20 @@ struct perf_hpp_fmt perf_hpp__format[] = { LIST_HEAD(perf_hpp__list); + #undef HPP__COLOR_PRINT_FNS #undef HPP__PRINT_FNS +#undef HPP_PERCENT_FNS +#undef HPP_RAW_FNS + +#undef __HPP_HEADER_FN +#undef __HPP_WIDTH_FN +#undef __HPP_COLOR_PERCENT_FN +#undef __HPP_ENTRY_PERCENT_FN +#undef __HPP_ENTRY_RAW_FN + + void perf_hpp__init(void) { if (symbol_conf.show_cpu_utilization) { -- cgit v1.2.3-59-g8ed1b From 5aed9d24934be5b7fec1b66cc2a5f29fab4ec11e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:35 +0900 Subject: perf hists browser: Convert hpp helpers to a function The hpp helpers do same job for each field so it was implemented as macro in order to access those fields easily. But it gets cumbersome to maintain a large function in a macro as the function grows. Factor it out to a function with a little helper macro to access field. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-8-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/hists.c | 46 +++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 57b82c26cd05..46242deee8d2 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -567,23 +567,41 @@ static int hist_browser__show_callchain(struct hist_browser *browser, return row - first_row; } -#define HPP__COLOR_FN(_name, _field) \ -static int hist_browser__hpp_color_ ## _name(struct perf_hpp *hpp, \ - struct hist_entry *he) \ +static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he, + u64 (*get_field)(struct hist_entry *)) +{ + int ret; + double percent = 0.0; + struct hists *hists = he->hists; + + if (hists->stats.total_period) + percent = 100.0 * get_field(he) / hists->stats.total_period; + + *(double *)hpp->ptr = percent; + + ret = scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); + return ret; +} + +#define __HPP_COLOR_PERCENT_FN(_type, _field) \ +static u64 __hpp_get_##_field(struct hist_entry *he) \ +{ \ + return he->stat._field; \ +} \ + \ +static int hist_browser__hpp_color_##_type(struct perf_hpp *hpp, \ + struct hist_entry *he) \ { \ - struct hists *hists = he->hists; \ - double percent = 100.0 * he->stat._field / hists->stats.total_period; \ - *(double *)hpp->ptr = percent; \ - return scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); \ + return __hpp__color_fmt(hpp, he, __hpp_get_##_field); \ } -HPP__COLOR_FN(overhead, period) -HPP__COLOR_FN(overhead_sys, period_sys) -HPP__COLOR_FN(overhead_us, period_us) -HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) -HPP__COLOR_FN(overhead_guest_us, period_guest_us) +__HPP_COLOR_PERCENT_FN(overhead, period) +__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys) +__HPP_COLOR_PERCENT_FN(overhead_us, period_us) +__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys) +__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us) -#undef HPP__COLOR_FN +#undef __HPP_COLOR_PERCENT_FN void hist_browser__init_hpp(void) { @@ -646,7 +664,7 @@ static int hist_browser__show_entry(struct hist_browser *browser, if (fmt->color) { hpp.ptr = &percent; - /* It will set percent for us. See HPP__COLOR_FN above. */ + /* It will set percent for us. See __hpp__color_fmt above. */ width -= fmt->color(&hpp, entry); ui_browser__set_percent_color(&browser->b, percent, current_entry); -- cgit v1.2.3-59-g8ed1b From 843985e953ddcc3d57a62641b377c8d3222859e2 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:36 +0900 Subject: perf gtk/browser: Convert hpp helpers to a function The hpp helpers do same job for each field so it was implemented as macro in order to access those fields easily. But it gets cumbersome to maintain a large function in a macro as the function grows. Factor it out to a function with a little helper macro to access field. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-9-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/gtk/hists.c | 68 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 21 deletions(-) diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index c03da79d524f..38be79d4b5c6 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -8,32 +8,58 @@ #define MAX_COLUMNS 32 -#define HPP__COLOR_FN(_name, _field) \ -static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \ - struct hist_entry *he) \ +static int perf_gtk__percent_color_snprintf(char *buf, size_t size, + double percent) +{ + int ret = 0; + const char *markup; + + markup = perf_gtk__get_percent_color(percent); + if (markup) + ret += scnprintf(buf, size, markup); + + ret += scnprintf(buf + ret, size - ret, "%6.2f%%", percent); + + if (markup) + ret += scnprintf(buf + ret, size - ret, ""); + + return ret; +} + + +static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he, + u64 (*get_field)(struct hist_entry *)) +{ + int ret; + double percent = 0.0; + struct hists *hists = he->hists; + + if (hists->stats.total_period) + percent = 100.0 * get_field(he) / hists->stats.total_period; + + ret = perf_gtk__percent_color_snprintf(hpp->buf, hpp->size, percent); + return ret; +} + +#define __HPP_COLOR_PERCENT_FN(_type, _field) \ +static u64 he_get_##_field(struct hist_entry *he) \ { \ - struct hists *hists = he->hists; \ - double percent = 100.0 * he->stat._field / hists->stats.total_period; \ - const char *markup; \ - int ret = 0; \ - \ - markup = perf_gtk__get_percent_color(percent); \ - if (markup) \ - ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \ - ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \ - if (markup) \ - ret += scnprintf(hpp->buf + ret, hpp->size - ret, ""); \ + return he->stat._field; \ +} \ \ - return ret; \ +static int perf_gtk__hpp_color_##_type(struct perf_hpp *hpp, \ + struct hist_entry *he) \ +{ \ + return __hpp__color_fmt(hpp, he, he_get_##_field); \ } -HPP__COLOR_FN(overhead, period) -HPP__COLOR_FN(overhead_sys, period_sys) -HPP__COLOR_FN(overhead_us, period_us) -HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) -HPP__COLOR_FN(overhead_guest_us, period_guest_us) +__HPP_COLOR_PERCENT_FN(overhead, period) +__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys) +__HPP_COLOR_PERCENT_FN(overhead_us, period_us) +__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys) +__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us) -#undef HPP__COLOR_FN +#undef __HPP_COLOR_PERCENT_FN void perf_gtk__init_hpp(void) -- cgit v1.2.3-59-g8ed1b From 5b9e2146ec4f8c6d436f9f7043a0409a4296a705 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:37 +0900 Subject: perf ui/hist: Add support for event group view Show group member's overhead also when showing the leader's if event group is enabled. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-10-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/hist.c | 62 +++++++++++++++++++++++++++++++++++++++++++++- tools/perf/ui/stdio/hist.c | 2 ++ 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 849c40028e7b..a47ce98c2cb1 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -21,6 +21,53 @@ static int __hpp__percent_fmt(struct perf_hpp *hpp, struct hist_entry *he, percent = 100.0 * get_field(he) / hists->stats.total_period; ret = print_fn(hpp->buf, hpp->size, fmt, percent); + + if (symbol_conf.event_group) { + int prev_idx, idx_delta; + struct perf_evsel *evsel = hists_to_evsel(hists); + struct hist_entry *pair; + int nr_members = evsel->nr_members; + + if (nr_members <= 1) + return ret; + + prev_idx = perf_evsel__group_idx(evsel); + + list_for_each_entry(pair, &he->pairs.head, pairs.node) { + u64 period = get_field(pair); + u64 total = pair->hists->stats.total_period; + + if (!total) + continue; + + evsel = hists_to_evsel(pair->hists); + idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; + + while (idx_delta--) { + /* + * zero-fill group members in the middle which + * have no sample + */ + ret += print_fn(hpp->buf + ret, hpp->size - ret, + fmt, 0.0); + } + + ret += print_fn(hpp->buf + ret, hpp->size - ret, + fmt, 100.0 * period / total); + + prev_idx = perf_evsel__group_idx(evsel); + } + + idx_delta = nr_members - prev_idx - 1; + + while (idx_delta--) { + /* + * zero-fill group members at last which have no sample + */ + ret += print_fn(hpp->buf + ret, hpp->size - ret, + fmt, 0.0); + } + } return ret; } @@ -40,6 +87,11 @@ static int hpp__header_##_type(struct perf_hpp *hpp) \ { \ int len = _min_width; \ \ + if (symbol_conf.event_group) { \ + struct perf_evsel *evsel = hpp->ptr; \ + \ + len = max(len, evsel->nr_members * _unit_width); \ + } \ return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \ } @@ -48,6 +100,11 @@ static int hpp__width_##_type(struct perf_hpp *hpp __maybe_unused) \ { \ int len = _min_width; \ \ + if (symbol_conf.event_group) { \ + struct perf_evsel *evsel = hpp->ptr; \ + \ + len = max(len, evsel->nr_members * _unit_width); \ + } \ return len; \ } @@ -438,12 +495,15 @@ unsigned int hists__sort_list_width(struct hists *hists) struct perf_hpp_fmt *fmt; struct sort_entry *se; int i = 0, ret = 0; + struct perf_hpp dummy_hpp = { + .ptr = hists_to_evsel(hists), + }; perf_hpp__for_each_format(fmt) { if (i) ret += 2; - ret += fmt->width(NULL); + ret += fmt->width(&dummy_hpp); } list_for_each_entry(se, &hist_entry__sort_list, list) diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c index f9798298e3e0..ff1f60cf442e 100644 --- a/tools/perf/ui/stdio/hist.c +++ b/tools/perf/ui/stdio/hist.c @@ -3,6 +3,7 @@ #include "../../util/util.h" #include "../../util/hist.h" #include "../../util/sort.h" +#include "../../util/evsel.h" static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) @@ -347,6 +348,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, struct perf_hpp dummy_hpp = { .buf = bf, .size = sizeof(bf), + .ptr = hists_to_evsel(hists), }; bool first = true; -- cgit v1.2.3-59-g8ed1b From 897014603c4786ef33450e675e02a5e74dc63785 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:38 +0900 Subject: perf hists browser: Move coloring logic to hpp functions Move coloring logic into the hpp functions so that each value can be colored independently. It'd required for event group view. For overhead column, add a callback for printing 'folded_sign' of callchains of a hist entry. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1358845787-1350-11-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/hists.c | 72 ++++++++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 30 deletions(-) diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 46242deee8d2..852e4e10c7c4 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -567,23 +567,48 @@ static int hist_browser__show_callchain(struct hist_browser *browser, return row - first_row; } +struct hpp_arg { + struct ui_browser *b; + char folded_sign; + bool current_entry; +}; + +static int __hpp__color_callchain(struct hpp_arg *arg) +{ + if (!symbol_conf.use_callchain) + return 0; + + slsmg_printf("%c ", arg->folded_sign); + return 2; +} + static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he, - u64 (*get_field)(struct hist_entry *)) + u64 (*get_field)(struct hist_entry *), + int (*callchain_cb)(struct hpp_arg *)) { - int ret; + int ret = 0; double percent = 0.0; struct hists *hists = he->hists; + struct hpp_arg *arg = hpp->ptr; if (hists->stats.total_period) percent = 100.0 * get_field(he) / hists->stats.total_period; - *(double *)hpp->ptr = percent; + ui_browser__set_percent_color(arg->b, percent, arg->current_entry); + + if (callchain_cb) + ret += callchain_cb(arg); + + ret += scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); + slsmg_printf("%s", hpp->buf); + + if (!arg->current_entry || !arg->b->navkeypressed) + ui_browser__set_color(arg->b, HE_COLORSET_NORMAL); - ret = scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); return ret; } -#define __HPP_COLOR_PERCENT_FN(_type, _field) \ +#define __HPP_COLOR_PERCENT_FN(_type, _field, _cb) \ static u64 __hpp_get_##_field(struct hist_entry *he) \ { \ return he->stat._field; \ @@ -592,14 +617,14 @@ static u64 __hpp_get_##_field(struct hist_entry *he) \ static int hist_browser__hpp_color_##_type(struct perf_hpp *hpp, \ struct hist_entry *he) \ { \ - return __hpp__color_fmt(hpp, he, __hpp_get_##_field); \ + return __hpp__color_fmt(hpp, he, __hpp_get_##_field, _cb); \ } -__HPP_COLOR_PERCENT_FN(overhead, period) -__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys) -__HPP_COLOR_PERCENT_FN(overhead_us, period_us) -__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys) -__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us) +__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__color_callchain) +__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, NULL) +__HPP_COLOR_PERCENT_FN(overhead_us, period_us, NULL) +__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, NULL) +__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, NULL) #undef __HPP_COLOR_PERCENT_FN @@ -626,7 +651,6 @@ static int hist_browser__show_entry(struct hist_browser *browser, unsigned short row) { char s[256]; - double percent; int printed = 0; int width = browser->b.width; char folded_sign = ' '; @@ -646,16 +670,20 @@ static int hist_browser__show_entry(struct hist_browser *browser, } if (row_offset == 0) { + struct hpp_arg arg = { + .b = &browser->b, + .folded_sign = folded_sign, + .current_entry = current_entry, + }; struct perf_hpp hpp = { .buf = s, .size = sizeof(s), + .ptr = &arg, }; - int i = 0; ui_browser__gotorc(&browser->b, row, 0); perf_hpp__for_each_format(fmt) { - if (!first) { slsmg_printf(" "); width -= 2; @@ -663,27 +691,11 @@ static int hist_browser__show_entry(struct hist_browser *browser, first = false; if (fmt->color) { - hpp.ptr = &percent; - /* It will set percent for us. See __hpp__color_fmt above. */ width -= fmt->color(&hpp, entry); - - ui_browser__set_percent_color(&browser->b, percent, current_entry); - - if (!i && symbol_conf.use_callchain) { - slsmg_printf("%c ", folded_sign); - width -= 2; - } - - slsmg_printf("%s", s); - - if (!current_entry || !browser->b.navkeypressed) - ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL); } else { width -= fmt->entry(&hpp, entry); slsmg_printf("%s", s); } - - i++; } /* The scroll bar isn't being used */ -- cgit v1.2.3-59-g8ed1b From 371d8c402e8c3562e913c7fda95094f42fbcf0ef Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:39 +0900 Subject: perf hists browser: Add suppport for event group view Show group members' overhead also when showing the leader's if event group is enabled. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-12-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/hists.c | 57 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 852e4e10c7c4..d7112dfa89cc 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -602,6 +602,63 @@ static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he, ret += scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); slsmg_printf("%s", hpp->buf); + if (symbol_conf.event_group) { + int prev_idx, idx_delta; + struct perf_evsel *evsel = hists_to_evsel(hists); + struct hist_entry *pair; + int nr_members = evsel->nr_members; + + if (nr_members <= 1) + goto out; + + prev_idx = perf_evsel__group_idx(evsel); + + list_for_each_entry(pair, &he->pairs.head, pairs.node) { + u64 period = get_field(pair); + u64 total = pair->hists->stats.total_period; + + if (!total) + continue; + + evsel = hists_to_evsel(pair->hists); + idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; + + while (idx_delta--) { + /* + * zero-fill group members in the middle which + * have no sample + */ + ui_browser__set_percent_color(arg->b, 0.0, + arg->current_entry); + ret += scnprintf(hpp->buf, hpp->size, + " %6.2f%%", 0.0); + slsmg_printf("%s", hpp->buf); + } + + percent = 100.0 * period / total; + ui_browser__set_percent_color(arg->b, percent, + arg->current_entry); + ret += scnprintf(hpp->buf, hpp->size, + " %6.2f%%", percent); + slsmg_printf("%s", hpp->buf); + + prev_idx = perf_evsel__group_idx(evsel); + } + + idx_delta = nr_members - prev_idx - 1; + + while (idx_delta--) { + /* + * zero-fill group members at last which have no sample + */ + ui_browser__set_percent_color(arg->b, 0.0, + arg->current_entry); + ret += scnprintf(hpp->buf, hpp->size, + " %6.2f%%", 0.0); + slsmg_printf("%s", hpp->buf); + } + } +out: if (!arg->current_entry || !arg->b->navkeypressed) ui_browser__set_color(arg->b, HE_COLORSET_NORMAL); -- cgit v1.2.3-59-g8ed1b From cb16008bcc6ea53617ca06c9eb4ab0ecafe04585 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:40 +0900 Subject: perf gtk/browser: Add support for event group view Show group members's overhead also when showing leader's if event group is enabled. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-13-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/gtk/hists.c | 56 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 52 insertions(+), 4 deletions(-) diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index 38be79d4b5c6..c87a98c7efaa 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -8,8 +8,7 @@ #define MAX_COLUMNS 32 -static int perf_gtk__percent_color_snprintf(char *buf, size_t size, - double percent) +static int __percent_color_snprintf(char *buf, size_t size, double percent) { int ret = 0; const char *markup; @@ -18,7 +17,7 @@ static int perf_gtk__percent_color_snprintf(char *buf, size_t size, if (markup) ret += scnprintf(buf, size, markup); - ret += scnprintf(buf + ret, size - ret, "%6.2f%%", percent); + ret += scnprintf(buf + ret, size - ret, " %6.2f%%", percent); if (markup) ret += scnprintf(buf + ret, size - ret, ""); @@ -37,7 +36,55 @@ static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he, if (hists->stats.total_period) percent = 100.0 * get_field(he) / hists->stats.total_period; - ret = perf_gtk__percent_color_snprintf(hpp->buf, hpp->size, percent); + ret = __percent_color_snprintf(hpp->buf, hpp->size, percent); + + if (symbol_conf.event_group) { + int prev_idx, idx_delta; + struct perf_evsel *evsel = hists_to_evsel(hists); + struct hist_entry *pair; + int nr_members = evsel->nr_members; + + if (nr_members <= 1) + return ret; + + prev_idx = perf_evsel__group_idx(evsel); + + list_for_each_entry(pair, &he->pairs.head, pairs.node) { + u64 period = get_field(pair); + u64 total = pair->hists->stats.total_period; + + evsel = hists_to_evsel(pair->hists); + idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; + + while (idx_delta--) { + /* + * zero-fill group members in the middle which + * have no sample + */ + ret += __percent_color_snprintf(hpp->buf + ret, + hpp->size - ret, + 0.0); + } + + percent = 100.0 * period / total; + ret += __percent_color_snprintf(hpp->buf + ret, + hpp->size - ret, + percent); + + prev_idx = perf_evsel__group_idx(evsel); + } + + idx_delta = nr_members - prev_idx - 1; + + while (idx_delta--) { + /* + * zero-fill group members at last which have no sample + */ + ret += __percent_color_snprintf(hpp->buf + ret, + hpp->size - ret, + 0.0); + } + } return ret; } @@ -96,6 +143,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) struct perf_hpp hpp = { .buf = s, .size = sizeof(s), + .ptr = hists_to_evsel(hists), }; nr_cols = 0; -- cgit v1.2.3-59-g8ed1b From 34b9564373a9e1d8c33d07824fae228a381d1803 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:42 +0900 Subject: perf gtk/browser: Trim column header string when event group enabled When event group feature is enabled, each column header is expanded to match with the whole group column width. But this is not needed for GTK+ browser since ti usually use variable-width fonts. So trim it. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-15-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/gtk/hists.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index c87a98c7efaa..caa554bdbfe7 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -170,7 +170,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) fmt->header(&hpp); gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), - -1, s, + -1, ltrim(s), renderer, "markup", col_idx++, NULL); } -- cgit v1.2.3-59-g8ed1b From fc24d7c25c9d880ae012548d522fe13caee556ed Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:43 +0900 Subject: perf report: Bypass non-leader events when event group is enabled Since we have all necessary information in the leader events and other members don't, bypass members. Member events will be shown along with the leaders if event group is enabled. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-16-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 4 ++++ tools/perf/ui/browsers/hists.c | 41 +++++++++++++++++++++++++++++++++++------ tools/perf/ui/gtk/hists.c | 4 ++++ 3 files changed, 43 insertions(+), 6 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index fd2503a5f876..9cc768ecacad 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -319,6 +319,10 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, struct hists *hists = &pos->hists; const char *evname = perf_evsel__name(pos); + if (symbol_conf.event_group && + !perf_evsel__is_group_leader(pos)) + continue; + hists__fprintf_nr_sample_events(hists, evname, stdout); hists__fprintf(hists, true, 0, 0, stdout); fprintf(stdout, "\n\n"); diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index d7112dfa89cc..167f7701f94e 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -1695,8 +1695,19 @@ out: return key; } +static bool filter_group_entries(struct ui_browser *self __maybe_unused, + void *entry) +{ + struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node); + + if (symbol_conf.event_group && !perf_evsel__is_group_leader(evsel)) + return true; + + return false; +} + static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, - const char *help, + int nr_entries, const char *help, struct hist_browser_timer *hbt, struct perf_session_env *env) { @@ -1707,7 +1718,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, .refresh = ui_browser__list_head_refresh, .seek = ui_browser__list_head_seek, .write = perf_evsel_menu__write, - .nr_entries = evlist->nr_entries, + .filter = filter_group_entries, + .nr_entries = nr_entries, .priv = evlist, }, .env = env, @@ -1723,20 +1735,37 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, menu.b.width = line_len; } - return perf_evsel_menu__run(&menu, evlist->nr_entries, help, hbt); + return perf_evsel_menu__run(&menu, nr_entries, help, hbt); } int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, struct hist_browser_timer *hbt, struct perf_session_env *env) { - if (evlist->nr_entries == 1) { + int nr_entries = evlist->nr_entries; + +single_entry: + if (nr_entries == 1) { struct perf_evsel *first = list_entry(evlist->entries.next, struct perf_evsel, node); const char *ev_name = perf_evsel__name(first); - return perf_evsel__hists_browse(first, evlist->nr_entries, help, + + return perf_evsel__hists_browse(first, nr_entries, help, ev_name, false, hbt, env); } - return __perf_evlist__tui_browse_hists(evlist, help, hbt, env); + if (symbol_conf.event_group) { + struct perf_evsel *pos; + + nr_entries = 0; + list_for_each_entry(pos, &evlist->entries, node) + if (perf_evsel__is_group_leader(pos)) + nr_entries++; + + if (nr_entries == 1) + goto single_entry; + } + + return __perf_evlist__tui_browse_hists(evlist, nr_entries, help, + hbt, env); } diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index caa554bdbfe7..9c02c4c0f3b7 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -271,6 +271,10 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, GtkWidget *scrolled_window; GtkWidget *tab_label; + if (symbol_conf.event_group && + !perf_evsel__is_group_leader(pos)) + continue; + scrolled_window = gtk_scrolled_window_new(NULL, NULL); gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), -- cgit v1.2.3-59-g8ed1b From 717e263fc354d53d0961e952b779d14a42c8ea66 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:44 +0900 Subject: perf report: Show group description when event group is enabled When using event group viewer, it's better to show the group description rather than the leader information alone. If a leader did not contain any member, it's a non-group event. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-17-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 15 +++++++++++++++ tools/perf/ui/browsers/hists.c | 25 +++++++++++++++++++++++++ tools/perf/ui/gtk/hists.c | 14 +++++++++++--- tools/perf/util/evsel.c | 25 +++++++++++++++++++++++++ tools/perf/util/evsel.h | 8 ++++++++ 5 files changed, 84 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 9cc768ecacad..ce518c48a525 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -299,6 +299,21 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self, char unit; unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE]; u64 nr_events = self->stats.total_period; + struct perf_evsel *evsel = hists_to_evsel(self); + char buf[512]; + size_t size = sizeof(buf); + + if (symbol_conf.event_group && evsel->nr_members > 1) { + struct perf_evsel *pos; + + perf_evsel__group_desc(evsel, buf, size); + evname = buf; + + for_each_group_member(pos, evsel) { + nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE]; + nr_events += pos->hists.stats.total_period; + } + } nr_samples = convert_unit(nr_samples, &unit); ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit); diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 167f7701f94e..20ccd57753f7 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -1189,6 +1189,21 @@ static int hists__browser_title(struct hists *hists, char *bf, size_t size, const struct thread *thread = hists->thread_filter; unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; u64 nr_events = hists->stats.total_period; + struct perf_evsel *evsel = hists_to_evsel(hists); + char buf[512]; + size_t buflen = sizeof(buf); + + if (symbol_conf.event_group && evsel->nr_members > 1) { + struct perf_evsel *pos; + + perf_evsel__group_desc(evsel, buf, buflen); + ev_name = buf; + + for_each_group_member(pos, evsel) { + nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE]; + nr_events += pos->hists.stats.total_period; + } + } nr_samples = convert_unit(nr_samples, &unit); printed = scnprintf(bf, size, @@ -1585,6 +1600,16 @@ static void perf_evsel_menu__write(struct ui_browser *browser, ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : HE_COLORSET_NORMAL); + if (symbol_conf.event_group && evsel->nr_members > 1) { + struct perf_evsel *pos; + + ev_name = perf_evsel__group_name(evsel); + + for_each_group_member(pos, evsel) { + nr_events += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE]; + } + } + nr_events = convert_unit(nr_events, &unit); printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events, unit, unit == ' ' ? "" : " ", ev_name); diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index 9c02c4c0f3b7..1e764a8ad259 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -270,10 +270,18 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *evname = perf_evsel__name(pos); GtkWidget *scrolled_window; GtkWidget *tab_label; + char buf[512]; + size_t size = sizeof(buf); - if (symbol_conf.event_group && - !perf_evsel__is_group_leader(pos)) - continue; + if (symbol_conf.event_group) { + if (!perf_evsel__is_group_leader(pos)) + continue; + + if (pos->nr_members > 1) { + perf_evsel__group_desc(pos, buf, size); + evname = buf; + } + } scrolled_window = gtk_scrolled_window_new(NULL, NULL); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index baa26ddbcc7b..94e74043ded8 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -434,6 +434,31 @@ const char *perf_evsel__name(struct perf_evsel *evsel) return evsel->name ?: "unknown"; } +const char *perf_evsel__group_name(struct perf_evsel *evsel) +{ + return evsel->group_name ?: "anon group"; +} + +int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) +{ + int ret; + struct perf_evsel *pos; + const char *group_name = perf_evsel__group_name(evsel); + + ret = scnprintf(buf, size, "%s", group_name); + + ret += scnprintf(buf + ret, size - ret, " { %s", + perf_evsel__name(evsel)); + + for_each_group_member(pos, evsel) + ret += scnprintf(buf + ret, size - ret, ", %s", + perf_evsel__name(pos)); + + ret += scnprintf(buf + ret, size - ret, " }"); + + return ret; +} + /* * The enable_on_exec/disabled value strategy: * diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 371713977888..8512f6a8a6ea 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -114,6 +114,8 @@ extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX]; int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size); const char *perf_evsel__name(struct perf_evsel *evsel); +const char *perf_evsel__group_name(struct perf_evsel *evsel); +int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size); int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); @@ -267,4 +269,10 @@ static inline int perf_evsel__group_idx(struct perf_evsel *evsel) { return evsel->idx - evsel->leader->idx; } + +#define for_each_group_member(_evsel, _leader) \ +for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \ + (_evsel) && (_evsel)->leader == (_leader); \ + (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node)) + #endif /* __PERF_EVSEL_H */ -- cgit v1.2.3-59-g8ed1b From 01d14f1615dfe1c6d040541501445967ac716009 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:45 +0900 Subject: perf report: Add --group option Add --group option to enable event grouping. When enabled, all the group members information will be shown together with the leader. $ perf report --group ... # group: {ref-cycles,cycles} # ======== # # Samples: 7K of event 'anon group { ref-cycles, cycles }' # Event count (approx.): 6876107743 # # Overhead Command Shared Object Symbol # ................ ....... ................. .......................... # 99.84% 99.76% noploop noploop [.] main 0.07% 0.00% noploop ld-2.15.so [.] strcmp 0.03% 0.00% noploop [kernel.kallsyms] [k] timerqueue_del 0.03% 0.03% noploop [kernel.kallsyms] [k] sched_clock_cpu 0.02% 0.00% noploop [kernel.kallsyms] [k] account_user_time 0.01% 0.00% noploop [kernel.kallsyms] [k] __alloc_pages_nodemask 0.00% 0.00% noploop [kernel.kallsyms] [k] native_write_msr_safe 0.00% 0.11% noploop [kernel.kallsyms] [k] _raw_spin_lock 0.00% 0.06% noploop [kernel.kallsyms] [k] find_get_page 0.00% 0.02% noploop [kernel.kallsyms] [k] rcu_check_callbacks 0.00% 0.02% noploop [kernel.kallsyms] [k] __current_kernel_time Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-18-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-report.txt | 3 +++ tools/perf/builtin-report.c | 2 ++ 2 files changed, 5 insertions(+) diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 848a0dcb6dfd..02284a0067f0 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -203,6 +203,9 @@ OPTIONS --objdump=:: Path to objdump binary. +--group:: + Show event group information together. + SEE ALSO -------- linkperf:perf-stat[1], linkperf:perf-annotate[1] diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index ce518c48a525..9ef38424f7c3 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -668,6 +668,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), + OPT_BOOLEAN(0, "group", &symbol_conf.event_group, + "Show event group information together"), OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "", "use branch records for histogram filling", parse_branch_mode), OPT_STRING(0, "objdump", &objdump_path, "path", -- cgit v1.2.3-59-g8ed1b From 00c7e1f10c6b8ae8a031f5c6a58ecd15d20c52cb Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:46 +0900 Subject: perf report: Add report.group config option Add report.group config option for setting default value of event group view. It affects the report output only if perf.data contains event group info. A user can write .perfconfig file like below to enable group view by default: $ cat ~/.perfconfig [report] group = true And it can be disabled through command line: $ perf report --no-group Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-19-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 9ef38424f7c3..0d221870561a 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -8,6 +8,7 @@ #include "builtin.h" #include "util/util.h" +#include "util/cache.h" #include "util/annotate.h" #include "util/color.h" @@ -54,6 +55,16 @@ struct perf_report { DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); }; +static int perf_report_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "report.group")) { + symbol_conf.event_group = perf_config_bool(var, value); + return 0; + } + + return perf_default_config(var, value, cb); +} + static int perf_report__add_branch_hist_entry(struct perf_tool *tool, struct addr_location *al, struct perf_sample *sample, @@ -677,6 +688,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) OPT_END() }; + perf_config(perf_report_config, NULL); + argc = parse_options(argc, argv, options, report_usage, 0); if (report.use_stdio) -- cgit v1.2.3-59-g8ed1b From e6ab07d027d47e55d8a5c0f33b16dfdd3e18c96f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:47 +0900 Subject: perf evlist: Add --group option Add '-g/--group' option for showing event groups. For simplicity it is currently not compatible with other options. $ perf evlist --group {ref-cycles,cycles} $ perf evlist ref-cycles cycles Suggested-by: Arnaldo Carvalho de Melo Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1358845787-1350-20-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-evlist.txt | 4 ++++ tools/perf/builtin-evlist.c | 7 +++++++ tools/perf/util/evsel.c | 24 ++++++++++++++++++++++-- 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt index 15217345c2fa..1ceb3700ffbb 100644 --- a/tools/perf/Documentation/perf-evlist.txt +++ b/tools/perf/Documentation/perf-evlist.txt @@ -28,6 +28,10 @@ OPTIONS --verbose=:: Show all fields. +-g:: +--group:: + Show event group information. + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-list[1], diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c index 1312a5e03ec7..85a5e35dd147 100644 --- a/tools/perf/builtin-evlist.c +++ b/tools/perf/builtin-evlist.c @@ -39,6 +39,8 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"), OPT_BOOLEAN('v', "verbose", &details.verbose, "Show all event attr details"), + OPT_BOOLEAN('g', "group", &symbol_conf.event_group, + "Show event group information"), OPT_END() }; const char * const evlist_usage[] = { @@ -50,5 +52,10 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) if (argc) usage_with_options(evlist_usage, options); + if (symbol_conf.event_group && (details.verbose || details.freq)) { + pr_err("--group option is not compatible with other options\n"); + usage_with_options(evlist_usage, options); + } + return __cmd_evlist(input_name, &details); } diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 94e74043ded8..a54701504606 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1389,7 +1389,27 @@ int perf_evsel__fprintf(struct perf_evsel *evsel, struct perf_attr_details *details, FILE *fp) { bool first = true; - int printed = fprintf(fp, "%s", perf_evsel__name(evsel)); + int printed = 0; + + if (symbol_conf.event_group) { + struct perf_evsel *pos; + + if (!perf_evsel__is_group_leader(evsel)) + return 0; + + if (evsel->nr_members > 1) + printed += fprintf(fp, "%s{", evsel->group_name ?: ""); + + printed += fprintf(fp, "%s", perf_evsel__name(evsel)); + for_each_group_member(pos, evsel) + printed += fprintf(fp, ",%s", perf_evsel__name(pos)); + + if (evsel->nr_members > 1) + printed += fprintf(fp, "}"); + goto out; + } + + printed += fprintf(fp, "%s", perf_evsel__name(evsel)); if (details->verbose || details->freq) { printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64, @@ -1430,7 +1450,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel, if_print(bp_type); if_print(branch_sample_type); } - +out: fputc('\n', fp); return ++printed; } -- cgit v1.2.3-59-g8ed1b From bbdc7aa442630a84feae45f4ca2dd7ed01abc868 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 22 Jan 2013 22:23:53 -0800 Subject: perf/Power7: Use macros to identify perf events Define and use macros to identify perf events codes This would make it easier and more readable when these event codes need to be used in more than one place. Signed-off-by: Sukadev Bhattiprolu Acked-by: Jiri Olsa Cc: Andi Kleen Cc: Anton Blanchard Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Stephane Eranian Cc: linuxppc-dev@ozlabs.org Link: http://lkml.kernel.org/r/20130123062353.GB13720@us.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- arch/powerpc/perf/power7-pmu.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 2ee01e38d5e2..eebb36de429f 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -50,6 +50,18 @@ #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0xff +/* + * Power7 event codes. + */ +#define PME_PM_CYC 0x1e +#define PME_PM_GCT_NOSLOT_CYC 0x100f8 +#define PME_PM_CMPLU_STALL 0x4000a +#define PME_PM_INST_CMPL 0x2 +#define PME_PM_LD_REF_L1 0xc880 +#define PME_PM_LD_MISS_L1 0x400f0 +#define PME_PM_BRU_FIN 0x10068 +#define PME_PM_BRU_MPRED 0x400f6 + /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 @@ -307,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[]) } static int power7_generic_events[] = { - [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, - [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */ - [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */ - [PERF_COUNT_HW_INSTRUCTIONS] = 2, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/ - [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */ - [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ + [PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL, + [PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL, + [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1, + [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN, + [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED, }; #define C(x) PERF_COUNT_HW_CACHE_##x -- cgit v1.2.3-59-g8ed1b From 2663960c159f23cbfb8e196c96e9fc9f3b5f1a8d Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 22 Jan 2013 22:24:23 -0800 Subject: perf: Make EVENT_ATTR global Rename EVENT_ATTR() to PMU_EVENT_ATTR() and make it global so it is available to all architectures. Further to allow architectures flexibility, have PMU_EVENT_ATTR() pass in the variable name as a parameter. Changelog[v2] - [Jiri Olsa] No need to define PMU_EVENT_PTR() Signed-off-by: Sukadev Bhattiprolu Acked-by: Jiri Olsa Cc: Andi Kleen Cc: Anton Blanchard Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Stephane Eranian Cc: linuxppc-dev@ozlabs.org Link: http://lkml.kernel.org/r/20130123062422.GC13720@us.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- arch/x86/kernel/cpu/perf_event.c | 13 +++---------- include/linux/perf_event.h | 11 +++++++++++ 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 6774c17a5576..c0df5ed2e048 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1310,11 +1310,6 @@ static struct attribute_group x86_pmu_format_group = { .attrs = NULL, }; -struct perf_pmu_events_attr { - struct device_attribute attr; - u64 id; -}; - /* * Remove all undefined events (x86_pmu.event_map(id) == 0) * out of events_attr attributes. @@ -1348,11 +1343,9 @@ static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *at #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr -#define EVENT_ATTR(_name, _id) \ -static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ - .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ - .id = PERF_COUNT_HW_##_id, \ -}; +#define EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \ + events_sysfs_show) EVENT_ATTR(cpu-cycles, CPU_CYCLES ); EVENT_ATTR(instructions, INSTRUCTIONS ); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6bfb2faa0b19..42adf012145d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -817,6 +817,17 @@ do { \ } while (0) +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; +}; + +#define PMU_EVENT_ATTR(_name, _var, _id, _show) \ +static struct perf_pmu_events_attr _var = { \ + .attr = __ATTR(_name, 0444, _show, NULL), \ + .id = _id, \ +}; + #define PMU_FORMAT_ATTR(_name, _format) \ static ssize_t \ _name##_show(struct device *dev, \ -- cgit v1.2.3-59-g8ed1b From 1c53a270724df91276d28d66f8e5a302fc6a5d74 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 22 Jan 2013 22:24:54 -0800 Subject: perf/POWER7: Make generic event translations available in sysfs Make the generic perf events in POWER7 available via sysfs. $ ls /sys/bus/event_source/devices/cpu/events branch-instructions branch-misses cache-misses cache-references cpu-cycles instructions stalled-cycles-backend stalled-cycles-frontend $ cat /sys/bus/event_source/devices/cpu/events/cache-misses event=0x400f0 This patch is based on commits that implement this functionality on x86. Eg: commit a47473939db20e3961b200eb00acf5fcf084d755 Author: Jiri Olsa Date: Wed Oct 10 14:53:11 2012 +0200 perf/x86: Make hardware event translations available in sysfs Changelog:[v2] [Jiri Osla] Drop EVENT_ID() macro since it is only used once. Signed-off-by: Sukadev Bhattiprolu Cc: Andi Kleen Cc: Anton Blanchard Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Stephane Eranian Cc: linuxppc-dev@ozlabs.org Link: http://lkml.kernel.org/r/20130123062454.GD13720@us.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- Documentation/ABI/stable/sysfs-devices-cpu-events | 0 arch/powerpc/include/asm/perf_event_server.h | 23 +++++++++++++++ arch/powerpc/perf/core-book3s.c | 12 ++++++++ arch/powerpc/perf/power7-pmu.c | 34 +++++++++++++++++++++++ 4 files changed, 69 insertions(+) create mode 100644 Documentation/ABI/stable/sysfs-devices-cpu-events diff --git a/Documentation/ABI/stable/sysfs-devices-cpu-events b/Documentation/ABI/stable/sysfs-devices-cpu-events new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 9710be3a2d17..b9b6c557bd0c 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -11,6 +11,7 @@ #include #include +#include #define MAX_HWEVENTS 8 #define MAX_EVENT_ALTERNATIVES 8 @@ -35,6 +36,7 @@ struct power_pmu { void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); int (*limited_pmc_event)(u64 event_id); u32 flags; + const struct attribute_group **attr_groups; int n_generic; int *generic_events; int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] @@ -109,3 +111,24 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); * If an event_id is not subject to the constraint expressed by a particular * field, then it will have 0 in both the mask and value for that field. */ + +extern ssize_t power_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page); + +/* + * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix. + * + * Having a suffix allows us to have aliases in sysfs - eg: the generic + * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and + * 'PM_CYC' where the latter is the name by which the event is known in + * POWER CPU specification. + */ +#define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix +#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix) + +#define EVENT_ATTR(_name, _id, _suffix) \ + PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \ + power_events_sysfs_show) + +#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) +#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index aa2465e21f1a..fa476d50791f 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1305,6 +1305,16 @@ static int power_pmu_event_idx(struct perf_event *event) return event->hw.idx; } +ssize_t power_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + struct pmu power_pmu = { .pmu_enable = power_pmu_enable, .pmu_disable = power_pmu_disable, @@ -1537,6 +1547,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu) pr_info("%s performance monitor hardware support registered\n", pmu->name); + power_pmu.attr_groups = ppmu->attr_groups; + #ifdef MSR_HV /* * Use FCHV to ignore kernel events if MSR.HV is set. diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index eebb36de429f..269bf2464a36 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -374,6 +374,39 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { }, }; + +GENERIC_EVENT_ATTR(cpu-cycles, CYC); +GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC); +GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL); +GENERIC_EVENT_ATTR(instructions, INST_CMPL); +GENERIC_EVENT_ATTR(cache-references, LD_REF_L1); +GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); +GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); +GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED); + +static struct attribute *power7_events_attr[] = { + GENERIC_EVENT_PTR(CYC), + GENERIC_EVENT_PTR(GCT_NOSLOT_CYC), + GENERIC_EVENT_PTR(CMPLU_STALL), + GENERIC_EVENT_PTR(INST_CMPL), + GENERIC_EVENT_PTR(LD_REF_L1), + GENERIC_EVENT_PTR(LD_MISS_L1), + GENERIC_EVENT_PTR(BRU_FIN), + GENERIC_EVENT_PTR(BRU_MPRED), + NULL +}; + + +static struct attribute_group power7_pmu_events_group = { + .name = "events", + .attrs = power7_events_attr, +}; + +static const struct attribute_group *power7_pmu_attr_groups[] = { + &power7_pmu_events_group, + NULL, +}; + static struct power_pmu power7_pmu = { .name = "POWER7", .n_counter = 6, @@ -385,6 +418,7 @@ static struct power_pmu power7_pmu = { .get_alternatives = power7_get_alternatives, .disable_pmc = power7_disable_pmc, .flags = PPMU_ALT_SIPR, + .attr_groups = power7_pmu_attr_groups, .n_generic = ARRAY_SIZE(power7_generic_events), .generic_events = power7_generic_events, .cache_events = &power7_cache_events, -- cgit v1.2.3-59-g8ed1b From 886c3b2d677fe248cce8101fa66a1b3e05c3ba16 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 22 Jan 2013 22:25:29 -0800 Subject: perf/POWER7: Make some POWER7 events available in sysfs Make some POWER7-specific perf events available in sysfs. $ /bin/ls -1 /sys/bus/event_source/devices/cpu/events/ branch-instructions branch-misses cache-misses cache-references cpu-cycles instructions PM_BRU_FIN PM_BRU_MPRED PM_CMPLU_STALL PM_CYC PM_GCT_NOSLOT_CYC PM_INST_CMPL PM_LD_MISS_L1 PM_LD_REF_L1 stalled-cycles-backend stalled-cycles-frontend where the 'PM_*' events are POWER specific and the others are the generic events. This will enable users to specify these events with their symbolic names rather than with their raw code. perf stat -e 'cpu/PM_CYC' ... Signed-off-by: Sukadev Bhattiprolu Cc: Andi Kleen Cc: Anton Blanchard Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Stephane Eranian Cc: linuxppc-dev@ozlabs.org Link: http://lkml.kernel.org/r/20130123062528.GE13720@us.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- arch/powerpc/include/asm/perf_event_server.h | 3 +++ arch/powerpc/perf/power7-pmu.c | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index b9b6c557bd0c..b29fcc651601 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -132,3 +132,6 @@ extern ssize_t power_events_sysfs_show(struct device *dev, #define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) #define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) + +#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p) +#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p) diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 269bf2464a36..b554879bd31e 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -384,6 +384,15 @@ GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED); +POWER_EVENT_ATTR(CYC, CYC); +POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC); +POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL); +POWER_EVENT_ATTR(INST_CMPL, INST_CMPL); +POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1); +POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1); +POWER_EVENT_ATTR(BRU_FIN, BRU_FIN) +POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED); + static struct attribute *power7_events_attr[] = { GENERIC_EVENT_PTR(CYC), GENERIC_EVENT_PTR(GCT_NOSLOT_CYC), @@ -393,6 +402,15 @@ static struct attribute *power7_events_attr[] = { GENERIC_EVENT_PTR(LD_MISS_L1), GENERIC_EVENT_PTR(BRU_FIN), GENERIC_EVENT_PTR(BRU_MPRED), + + POWER_EVENT_PTR(CYC), + POWER_EVENT_PTR(GCT_NOSLOT_CYC), + POWER_EVENT_PTR(CMPLU_STALL), + POWER_EVENT_PTR(INST_CMPL), + POWER_EVENT_PTR(LD_REF_L1), + POWER_EVENT_PTR(LD_MISS_L1), + POWER_EVENT_PTR(BRU_FIN), + POWER_EVENT_PTR(BRU_MPRED), NULL }; -- cgit v1.2.3-59-g8ed1b From 2ac3634a7e1c8eedc961030c87c5c36ebd5bbf8e Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 22 Jan 2013 22:26:45 -0800 Subject: perf: Document the ABI of perf sysfs entries This patchset addes two new sets of files to sysfs for POWER architecture. - perf event config format in /sys/devices/cpu/format/event - generic and POWER-specific perf events in /sys/devices/cpu/events/ The format of the first file is already documented in: sysfs-bus-event_source-devices-format Document the format of the second set of files '/sys/devices/cpu/events/*' which would also become part of the ABI. Changelog[v4]: [Jiri Olsa]: Mention that multiple event= like terms can be specified in the 'events' file. [Jiri Olsa]: Remove the documentation for the 'config format' file as it is already documented in 'Documentation/ABI/testing/'. [Jiri Olsa]: Move ABI documentation from 'stable/' to 'testing/' Changelog[v3]: [Greg KH] Include ABI documentation. Signed-off-by: Sukadev Bhattiprolu Acked-by: Jiri Olsa Cc: Andi Kleen Cc: Anton Blanchard Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Stephane Eranian Cc: linuxppc-dev@ozlabs.org Link: http://lkml.kernel.org/r/20130123062645.GG13720@us.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- Documentation/ABI/stable/sysfs-devices-cpu-events | 0 .../testing/sysfs-bus-event_source-devices-events | 62 ++++++++++++++++++++++ 2 files changed, 62 insertions(+) delete mode 100644 Documentation/ABI/stable/sysfs-devices-cpu-events create mode 100644 Documentation/ABI/testing/sysfs-bus-event_source-devices-events diff --git a/Documentation/ABI/stable/sysfs-devices-cpu-events b/Documentation/ABI/stable/sysfs-devices-cpu-events deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events new file mode 100644 index 000000000000..0adeb524c0d4 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events @@ -0,0 +1,62 @@ +What: /sys/devices/cpu/events/ + /sys/devices/cpu/events/branch-misses + /sys/devices/cpu/events/cache-references + /sys/devices/cpu/events/cache-misses + /sys/devices/cpu/events/stalled-cycles-frontend + /sys/devices/cpu/events/branch-instructions + /sys/devices/cpu/events/stalled-cycles-backend + /sys/devices/cpu/events/instructions + /sys/devices/cpu/events/cpu-cycles + +Date: 2013/01/08 + +Contact: Linux kernel mailing list + +Description: Generic performance monitoring events + + A collection of performance monitoring events that may be + supported by many/most CPUs. These events can be monitored + using the 'perf(1)' tool. + + The contents of each file would look like: + + event=0xNNNN + + where 'N' is a hex digit and the number '0xNNNN' shows the + "raw code" for the perf event identified by the file's + "basename". + + +What: /sys/devices/cpu/events/PM_LD_MISS_L1 + /sys/devices/cpu/events/PM_LD_REF_L1 + /sys/devices/cpu/events/PM_CYC + /sys/devices/cpu/events/PM_BRU_FIN + /sys/devices/cpu/events/PM_GCT_NOSLOT_CYC + /sys/devices/cpu/events/PM_BRU_MPRED + /sys/devices/cpu/events/PM_INST_CMPL + /sys/devices/cpu/events/PM_CMPLU_STALL + +Date: 2013/01/08 + +Contact: Linux kernel mailing list + Linux Powerpc mailing list + +Description: POWER-systems specific performance monitoring events + + A collection of performance monitoring events that may be + supported by the POWER CPU. These events can be monitored + using the 'perf(1)' tool. + + These events may not be supported by other CPUs. + + The contents of each file would look like: + + event=0xNNNN + + where 'N' is a hex digit and the number '0xNNNN' shows the + "raw code" for the perf event identified by the file's + "basename". + + Further, multiple terms like 'event=0xNNNN' can be specified + and separated with comma. All available terms are defined in + the /sys/bus/event_source/devices//format file. -- cgit v1.2.3-59-g8ed1b From d840f718d28715a9833c1a8f46c2493ff3fd219b Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Fri, 1 Feb 2013 18:38:47 -0500 Subject: tracing: Init current_trace to nop_trace and remove NULL checks On early boot up, when the ftrace ring buffer is initialized, the static variable current_trace is initialized to &nop_trace. Before this initialization, current_trace is NULL and will never become NULL again. It is always reassigned to a ftrace tracer. Several places check if current_trace is NULL before it uses it, and this check is frivolous, because at the point in time when the checks are made the only way current_trace could be NULL is if ftrace failed its allocations at boot up, and the paths to these locations would probably not be possible. By initializing current_trace to &nop_trace where it is declared, current_trace will never be NULL, and we can remove all these checks of current_trace being NULL which never needed to be checked in the first place. Cc: Dan Carpenter Cc: Hiraku Toyooka Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 70dce64b9ecf..5d520b7bb4c5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -249,7 +249,7 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; static struct tracer *trace_types __read_mostly; /* current_trace points to the tracer that is currently active */ -static struct tracer *current_trace __read_mostly; +static struct tracer *current_trace __read_mostly = &nop_trace; /* * trace_types_lock is used to protect the trace_types list. @@ -2100,8 +2100,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) unsigned long total; const char *name = "preemption"; - if (type) - name = type->name; + name = type->name; get_total_entries(tr, &total, &entries); @@ -2477,13 +2476,12 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) if (!iter->trace) goto fail; - if (current_trace) - *iter->trace = *current_trace; + *iter->trace = *current_trace; if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; - if ((current_trace && current_trace->print_max) || snapshot) + if (current_trace->print_max || snapshot) iter->tr = &max_tr; else iter->tr = &global_trace; @@ -3037,10 +3035,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, int r; mutex_lock(&trace_types_lock); - if (current_trace) - r = sprintf(buf, "%s\n", current_trace->name); - else - r = sprintf(buf, "\n"); + r = sprintf(buf, "%s\n", current_trace->name); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); @@ -3231,10 +3226,10 @@ static int tracing_set_tracer(const char *buf) goto out; trace_branch_disable(); - if (current_trace && current_trace->reset) + if (current_trace->reset) current_trace->reset(tr); - had_max_tr = current_trace && current_trace->allocated_snapshot; + had_max_tr = current_trace->allocated_snapshot; current_trace = &nop_trace; if (had_max_tr && !t->use_max_tr) { @@ -3373,8 +3368,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) ret = -ENOMEM; goto fail; } - if (current_trace) - *iter->trace = *current_trace; + *iter->trace = *current_trace; if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { ret = -ENOMEM; @@ -3525,7 +3519,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, /* copy the tracer to avoid using a global lock all around */ mutex_lock(&trace_types_lock); - if (unlikely(current_trace && iter->trace->name != current_trace->name)) + if (unlikely(iter->trace->name != current_trace->name)) *iter->trace = *current_trace; mutex_unlock(&trace_types_lock); @@ -3691,7 +3685,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, /* copy the tracer to avoid using a global lock all around */ mutex_lock(&trace_types_lock); - if (unlikely(current_trace && iter->trace->name != current_trace->name)) + if (unlikely(iter->trace->name != current_trace->name)) *iter->trace = *current_trace; mutex_unlock(&trace_types_lock); @@ -4115,7 +4109,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, mutex_lock(&trace_types_lock); - if (current_trace && current_trace->use_max_tr) { + if (current_trace->use_max_tr) { ret = -EBUSY; goto out; } @@ -5299,7 +5293,7 @@ __init static int tracer_alloc_buffers(void) init_irq_work(&trace_work_wakeup, trace_wake_up); register_tracer(&nop_trace); - current_trace = &nop_trace; + /* All seems OK, enable tracing */ tracing_disabled = 0; -- cgit v1.2.3-59-g8ed1b From f2b4367a69c60c644a1df36f63a65e0e677d3b0f Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 5 Feb 2013 15:04:49 -0800 Subject: perf/powerpc: Fix build error Fix compile errors like those below: CC arch/powerpc/perf/power7-pmu.o /home/git/linux/arch/powerpc/perf/power7-pmu.c:397:2: error: initialization from incompatible pointer type [-Werror] Reported-by: Stephen Rothwell Signed-off-by: Sukadev Bhattiprolu Acked-by: Arnaldo Carvalho de Melo Cc: linuxppc-dev@ozlabs.org Link: http://lkml.kernel.org/r/20130205231938.GA24125@us.ibm.com Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_event_server.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index b29fcc651601..136bba62efa4 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -124,7 +124,7 @@ extern ssize_t power_events_sysfs_show(struct device *dev, * POWER CPU specification. */ #define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix -#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix) +#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr #define EVENT_ATTR(_name, _id, _suffix) \ PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \ -- cgit v1.2.3-59-g8ed1b From 2c53c3dd0b6497484b29fd49d34ef98acbc14577 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 6 Feb 2013 11:26:24 -0600 Subject: perf/x86/amd: Rework northbridge event constraints handler Code simplification. No functional changes. Signed-off-by: Robert Richter Signed-off-by: Jacob Shin Acked-by: Stephane Eranian Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Robert Richter Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360171589-6381-2-git-send-email-jacob.shin@amd.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_amd.c | 68 ++++++++++++++---------------------- 1 file changed, 26 insertions(+), 42 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index c93bc4e813a0..e7963c7af683 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -256,9 +256,8 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; struct amd_nb *nb = cpuc->amd_nb; - struct perf_event *old = NULL; - int max = x86_pmu.num_counters; - int i, j, k = -1; + struct perf_event *old; + int idx, new = -1; /* * if not NB event or no NB, then no constraints @@ -276,48 +275,33 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) * because of successive calls to x86_schedule_events() from * hw_perf_group_sched_in() without hw_perf_enable() */ - for (i = 0; i < max; i++) { - /* - * keep track of first free slot - */ - if (k == -1 && !nb->owners[i]) - k = i; + for (idx = 0; idx < x86_pmu.num_counters; idx++) { + if (new == -1 || hwc->idx == idx) + /* assign free slot, prefer hwc->idx */ + old = cmpxchg(nb->owners + idx, NULL, event); + else if (nb->owners[idx] == event) + /* event already present */ + old = event; + else + continue; + + if (old && old != event) + continue; + + /* reassign to this slot */ + if (new != -1) + cmpxchg(nb->owners + new, event, NULL); + new = idx; /* already present, reuse */ - if (nb->owners[i] == event) - goto done; - } - /* - * not present, so grab a new slot - * starting either at: - */ - if (hwc->idx != -1) { - /* previous assignment */ - i = hwc->idx; - } else if (k != -1) { - /* start from free slot found */ - i = k; - } else { - /* - * event not found, no slot found in - * first pass, try again from the - * beginning - */ - i = 0; - } - j = i; - do { - old = cmpxchg(nb->owners+i, NULL, event); - if (!old) + if (old == event) break; - if (++i == max) - i = 0; - } while (i != j); -done: - if (!old) - return &nb->event_constraints[i]; - - return &emptyconstraint; + } + + if (new == -1) + return &emptyconstraint; + + return &nb->event_constraints[new]; } static struct amd_nb *amd_alloc_nb(int cpu) -- cgit v1.2.3-59-g8ed1b From 4dd4c2ae555d8a91e8c5bf1cd56807a35764436a Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 6 Feb 2013 11:26:25 -0600 Subject: perf/x86/amd: Generalize northbridge constraints code for family 15h Generalize northbridge constraints code for family 10h so that later we can reuse the same code path with other AMD processor families that have the same northbridge event constraints. Signed-off-by: Robert Richter Signed-off-by: Jacob Shin Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Cc: Jiri Olsa Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360171589-6381-3-git-send-email-jacob.shin@amd.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_amd.c | 43 +++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index e7963c7af683..f8c9dfbd6613 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -188,19 +188,12 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc) return nb && nb->nb_id != -1; } -static void amd_put_event_constraints(struct cpu_hw_events *cpuc, - struct perf_event *event) +static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; struct amd_nb *nb = cpuc->amd_nb; int i; - /* - * only care about NB events - */ - if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) - return; - /* * need to scan whole list because event may not have * been assigned during scheduling @@ -247,24 +240,19 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, * * Given that resources are allocated (cmpxchg), they must be * eventually freed for others to use. This is accomplished by - * calling amd_put_event_constraints(). + * calling __amd_put_nb_event_constraints() * * Non NB events are not impacted by this restriction. */ static struct event_constraint * -amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) +__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, + struct event_constraint *c) { struct hw_perf_event *hwc = &event->hw; struct amd_nb *nb = cpuc->amd_nb; struct perf_event *old; int idx, new = -1; - /* - * if not NB event or no NB, then no constraints - */ - if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) - return &unconstrained; - /* * detect if already present, if so reuse * @@ -275,7 +263,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) * because of successive calls to x86_schedule_events() from * hw_perf_group_sched_in() without hw_perf_enable() */ - for (idx = 0; idx < x86_pmu.num_counters; idx++) { + for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { if (new == -1 || hwc->idx == idx) /* assign free slot, prefer hwc->idx */ old = cmpxchg(nb->owners + idx, NULL, event); @@ -391,6 +379,25 @@ static void amd_pmu_cpu_dead(int cpu) } } +static struct event_constraint * +amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) +{ + /* + * if not NB event or no NB, then no constraints + */ + if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) + return &unconstrained; + + return __amd_get_nb_event_constraints(cpuc, event, &unconstrained); +} + +static void amd_put_event_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) + __amd_put_nb_event_constraints(cpuc, event); +} + PMU_FORMAT_ATTR(event, "config:0-7,32-35"); PMU_FORMAT_ATTR(umask, "config:8-15" ); PMU_FORMAT_ATTR(edge, "config:18" ); -- cgit v1.2.3-59-g8ed1b From 9f19010af8c651879ac2c36f1a808a3a4419cd40 Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Wed, 6 Feb 2013 11:26:26 -0600 Subject: perf/x86/amd: Use proper naming scheme for AMD bit field definitions Update these AMD bit field names to be consistent with naming convention followed by the rest of the file. Signed-off-by: Jacob Shin Acked-by: Stephane Eranian Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360171589-6381-4-git-send-email-jacob.shin@amd.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_event.h | 4 ++-- arch/x86/kernel/cpu/perf_event_amd.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 4fabcdf1cfa7..2234eaaecb52 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -29,8 +29,8 @@ #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL -#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) -#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) +#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) +#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) #define AMD64_EVENTSEL_EVENT \ (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index f8c9dfbd6613..aea8c2021f78 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -156,9 +156,9 @@ static int amd_pmu_hw_config(struct perf_event *event) event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_OS); else if (event->attr.exclude_host) - event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; + event->hw.config |= AMD64_EVENTSEL_GUESTONLY; else if (event->attr.exclude_guest) - event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; + event->hw.config |= AMD64_EVENTSEL_HOSTONLY; if (event->attr.type != PERF_TYPE_RAW) return 0; @@ -336,7 +336,7 @@ static void amd_pmu_cpu_starting(int cpu) struct amd_nb *nb; int i, nb_id; - cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; + cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; if (boot_cpu_data.x86_max_cores < 2) return; @@ -669,7 +669,7 @@ void amd_pmu_disable_virt(void) * SVM is disabled the Guest-only bits still gets set and the counter * will not count anything. */ - cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; + cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; /* Reload all events */ x86_pmu_disable_all(); -- cgit v1.2.3-59-g8ed1b From 4c1fd17a1cb32bc4f429c7a5ff9a91a3bffdb8fa Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Wed, 6 Feb 2013 11:26:27 -0600 Subject: perf/x86: Move MSR address offset calculation to architecture specific files Move counter index to MSR address offset calculation to architecture specific files. This prepares the way for perf_event_amd to enable counter addresses that are not contiguous -- for example AMD Family 15h processors have 6 core performance counters starting at 0xc0010200 and 4 northbridge performance counters starting at 0xc0010240. Signed-off-by: Jacob Shin Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Cc: Jiri Olsa Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360171589-6381-5-git-send-email-jacob.shin@amd.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.h | 21 +++++------------- arch/x86/kernel/cpu/perf_event_amd.c | 42 ++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 115c1ea97746..a7f06a90d2e7 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -325,6 +325,7 @@ struct x86_pmu { int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); unsigned eventsel; unsigned perfctr; + int (*addr_offset)(int index, bool eventsel); u64 (*event_map)(int); int max_events; int num_counters; @@ -446,28 +447,16 @@ extern u64 __read_mostly hw_cache_extra_regs u64 x86_perf_event_update(struct perf_event *event); -static inline int x86_pmu_addr_offset(int index) -{ - int offset; - - /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ - alternative_io(ASM_NOP2, - "shll $1, %%eax", - X86_FEATURE_PERFCTR_CORE, - "=a" (offset), - "a" (index)); - - return offset; -} - static inline unsigned int x86_pmu_config_addr(int index) { - return x86_pmu.eventsel + x86_pmu_addr_offset(index); + return x86_pmu.eventsel + (x86_pmu.addr_offset ? + x86_pmu.addr_offset(index, true) : index); } static inline unsigned int x86_pmu_event_addr(int index) { - return x86_pmu.perfctr + x86_pmu_addr_offset(index); + return x86_pmu.perfctr + (x86_pmu.addr_offset ? + x86_pmu.addr_offset(index, false) : index); } int x86_setup_perfctr(struct perf_event *event); diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index aea8c2021f78..b60f31caeda0 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -132,6 +132,47 @@ static u64 amd_pmu_event_map(int hw_event) return amd_perfmon_event_map[hw_event]; } +/* + * Previously calculated offsets + */ +static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; +static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; + +/* + * Legacy CPUs: + * 4 counters starting at 0xc0010000 each offset by 1 + * + * CPUs with core performance counter extensions: + * 6 counters starting at 0xc0010200 each offset by 2 + */ +static inline int amd_pmu_addr_offset(int index, bool eventsel) +{ + int offset; + + if (!index) + return index; + + if (eventsel) + offset = event_offsets[index]; + else + offset = count_offsets[index]; + + if (offset) + return offset; + + if (!cpu_has_perfctr_core) + offset = index; + else + offset = index << 1; + + if (eventsel) + event_offsets[index] = offset; + else + count_offsets[index] = offset; + + return offset; +} + static int amd_pmu_hw_config(struct perf_event *event) { int ret; @@ -578,6 +619,7 @@ static __initconst const struct x86_pmu amd_pmu = { .schedule_events = x86_schedule_events, .eventsel = MSR_K7_EVNTSEL0, .perfctr = MSR_K7_PERFCTR0, + .addr_offset = amd_pmu_addr_offset, .event_map = amd_pmu_event_map, .max_events = ARRAY_SIZE(amd_perfmon_event_map), .num_counters = AMD64_NUM_COUNTERS, -- cgit v1.2.3-59-g8ed1b From 0fbdad078a70ed72248c3d30fe32e45e83be00d1 Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Wed, 6 Feb 2013 11:26:28 -0600 Subject: perf/x86: Allow for architecture specific RDPMC indexes Similar to config_base and event_base, allow architecture specific RDPMC ECX values. Signed-off-by: Jacob Shin Acked-by: Stephane Eranian Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360171589-6381-6-git-send-email-jacob.shin@amd.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 2 +- arch/x86/kernel/cpu/perf_event.h | 6 ++++++ arch/x86/kernel/cpu/perf_event_amd.c | 6 ++++++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index c0df5ed2e048..bf0f01aea994 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -829,7 +829,7 @@ static inline void x86_assign_hw_event(struct perf_event *event, } else { hwc->config_base = x86_pmu_config_addr(hwc->idx); hwc->event_base = x86_pmu_event_addr(hwc->idx); - hwc->event_base_rdpmc = hwc->idx; + hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx); } } diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index a7f06a90d2e7..7f5c75c2afdd 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -326,6 +326,7 @@ struct x86_pmu { unsigned eventsel; unsigned perfctr; int (*addr_offset)(int index, bool eventsel); + int (*rdpmc_index)(int index); u64 (*event_map)(int); int max_events; int num_counters; @@ -459,6 +460,11 @@ static inline unsigned int x86_pmu_event_addr(int index) x86_pmu.addr_offset(index, false) : index); } +static inline int x86_pmu_rdpmc_index(int index) +{ + return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; +} + int x86_setup_perfctr(struct perf_event *event); int x86_pmu_hw_config(struct perf_event *event); diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index b60f31caeda0..05462f0432d5 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -173,6 +173,11 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) return offset; } +static inline int amd_pmu_rdpmc_index(int index) +{ + return index; +} + static int amd_pmu_hw_config(struct perf_event *event) { int ret; @@ -620,6 +625,7 @@ static __initconst const struct x86_pmu amd_pmu = { .eventsel = MSR_K7_EVNTSEL0, .perfctr = MSR_K7_PERFCTR0, .addr_offset = amd_pmu_addr_offset, + .rdpmc_index = amd_pmu_rdpmc_index, .event_map = amd_pmu_event_map, .max_events = ARRAY_SIZE(amd_perfmon_event_map), .num_counters = AMD64_NUM_COUNTERS, -- cgit v1.2.3-59-g8ed1b From 341487ab561f3937a5283dd77c5660b1ee3b1f9e Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Sun, 3 Feb 2013 14:38:20 +0800 Subject: perf hists browser: Add option for runtime switching perf data file Based on perf report/top/scripts browser integration idea from acme. This will enable user to runtime switch the data file, when this option is selected, it will popup all the legal data files in current working directory, and the filename selected by user is saved in the global variable "input_name", and a new key 'K_SWITCH_INPUT_DATA' will be passed back to the built-in command which will perform the switch. This initial version only enables it for 'perf report'. v2: rebase to latest 'perf/core' branch (6e1d4dd) of acme's perf tree Signed-off-by: Feng Tang Cc: Andi Kleen Cc: Ingo Molnar Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1359873501-24541-1-git-send-email-feng.tang@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/hists.c | 112 ++++++++++++++++++++++++++++++++++++++++- tools/perf/ui/keysyms.h | 1 + 2 files changed, 112 insertions(+), 1 deletion(-) diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 20ccd57753f7..aa22704047d6 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -1241,6 +1241,96 @@ static inline bool is_report_browser(void *timer) return timer == NULL; } +/* + * Only runtime switching of perf data file will make "input_name" point + * to a malloced buffer. So add "is_input_name_malloced" flag to decide + * whether we need to call free() for current "input_name" during the switch. + */ +static bool is_input_name_malloced = false; + +static int switch_data_file(void) +{ + char *pwd, *options[32], *abs_path[32], *tmp; + DIR *pwd_dir; + int nr_options = 0, choice = -1, ret = -1; + struct dirent *dent; + + pwd = getenv("PWD"); + if (!pwd) + return ret; + + pwd_dir = opendir(pwd); + if (!pwd_dir) + return ret; + + memset(options, 0, sizeof(options)); + memset(options, 0, sizeof(abs_path)); + + while ((dent = readdir(pwd_dir))) { + char path[PATH_MAX]; + u64 magic; + char *name = dent->d_name; + FILE *file; + + if (!(dent->d_type == DT_REG)) + continue; + + snprintf(path, sizeof(path), "%s/%s", pwd, name); + + file = fopen(path, "r"); + if (!file) + continue; + + if (fread(&magic, 1, 8, file) < 8) + goto close_file_and_continue; + + if (is_perf_magic(magic)) { + options[nr_options] = strdup(name); + if (!options[nr_options]) + goto close_file_and_continue; + + abs_path[nr_options] = strdup(path); + if (!abs_path[nr_options]) { + free(options[nr_options]); + ui__warning("Can't search all data files due to memory shortage.\n"); + fclose(file); + break; + } + + nr_options++; + } + +close_file_and_continue: + fclose(file); + if (nr_options >= 32) { + ui__warning("Too many perf data files in PWD!\n" + "Only the first 32 files will be listed.\n"); + break; + } + } + closedir(pwd_dir); + + if (nr_options) { + choice = ui__popup_menu(nr_options, options); + if (choice < nr_options && choice >= 0) { + tmp = strdup(abs_path[choice]); + if (tmp) { + if (is_input_name_malloced) + free((void *)input_name); + input_name = tmp; + is_input_name_malloced = true; + ret = 0; + } else + ui__warning("Data switch failed due to memory shortage!\n"); + } + } + + free_popup_options(options, nr_options); + free_popup_options(abs_path, nr_options); + return ret; +} + + static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, const char *helpline, const char *ev_name, bool left_exits, @@ -1275,7 +1365,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, int choice = 0, annotate = -2, zoom_dso = -2, zoom_thread = -2, annotate_f = -2, annotate_t = -2, browse_map = -2; - int scripts_comm = -2, scripts_symbol = -2, scripts_all = -2; + int scripts_comm = -2, scripts_symbol = -2, + scripts_all = -2, switch_data = -2; nr_options = 0; @@ -1332,6 +1423,10 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, if (is_report_browser(hbt)) goto do_scripts; continue; + case 's': + if (is_report_browser(hbt)) + goto do_data_switch; + continue; case K_F1: case 'h': case '?': @@ -1351,6 +1446,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, "d Zoom into current DSO\n" "t Zoom into current Thread\n" "r Run available scripts('perf report' only)\n" + "s Switch to another data file in PWD ('perf report' only)\n" "P Print histograms to perf.hist.N\n" "V Verbose (DSO names in callchains, etc)\n" "/ Filter symbol by name"); @@ -1458,6 +1554,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, if (asprintf(&options[nr_options], "Run scripts for all samples") > 0) scripts_all = nr_options++; + if (is_report_browser(hbt) && asprintf(&options[nr_options], + "Switch to another data file in PWD") > 0) + switch_data = nr_options++; add_exit_option: options[nr_options++] = (char *)"Exit"; retry_popup_menu: @@ -1568,6 +1667,16 @@ do_scripts: script_browse(script_opt); } + /* Switch to another data file */ + else if (choice == switch_data) { +do_data_switch: + if (!switch_data_file()) { + key = K_SWITCH_INPUT_DATA; + break; + } else + ui__warning("Won't switch the data files due to\n" + "no valid data file get selected!\n"); + } } out_free_stack: pstack__delete(fstack); @@ -1694,6 +1803,7 @@ browse_hists: "Do you really want to exit?")) continue; /* Fall thru */ + case K_SWITCH_INPUT_DATA: case 'q': case CTRL('c'): goto out; diff --git a/tools/perf/ui/keysyms.h b/tools/perf/ui/keysyms.h index 809eca5707fa..65092d576b4e 100644 --- a/tools/perf/ui/keysyms.h +++ b/tools/perf/ui/keysyms.h @@ -23,5 +23,6 @@ #define K_TIMER -1 #define K_ERROR -2 #define K_RESIZE -3 +#define K_SWITCH_INPUT_DATA -4 #endif /* _PERF_KEYSYMS_H_ */ -- cgit v1.2.3-59-g8ed1b From ad0de0971b7f7097bd9be1ab4ad2a64db500adbf Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Sun, 3 Feb 2013 14:38:21 +0800 Subject: perf report: Enable the runtime switching of perf data file This is for tui browser only. This patch will check the returned key of tui hists browser, if it's K_SWITH_INPUT_DATA, then recreate a session for the new selected data file. V2: Move the setup_brower() before the "repeat" jump point. Signed-off-by: Feng Tang Cc: Andi Kleen Cc: Ingo Molnar Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1359873501-24541-2-git-send-email-feng.tang@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 0d221870561a..91555d4885f4 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -468,9 +468,17 @@ static int __cmd_report(struct perf_report *rep) if (use_browser > 0) { if (use_browser == 1) { - perf_evlist__tui_browse_hists(session->evlist, help, - NULL, - &session->header.env); + ret = perf_evlist__tui_browse_hists(session->evlist, + help, + NULL, + &session->header.env); + /* + * Usually "ret" is the last pressed key, and we only + * care if the key notifies us to switch data file. + */ + if (ret != K_SWITCH_INPUT_DATA) + ret = 0; + } else if (use_browser == 2) { perf_evlist__gtk_browse_hists(session->evlist, help, NULL); @@ -708,6 +716,16 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) else input_name = "perf.data"; } + + if (strcmp(input_name, "-") != 0) + setup_browser(true); + else { + use_browser = 0; + perf_hpp__column_enable(PERF_HPP__OVERHEAD); + perf_hpp__init(); + } + +repeat: session = perf_session__new(input_name, O_RDONLY, report.force, false, &report.tool); if (session == NULL) @@ -733,14 +751,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) } - if (strcmp(input_name, "-") != 0) - setup_browser(true); - else { - use_browser = 0; - perf_hpp__column_enable(PERF_HPP__OVERHEAD); - perf_hpp__init(); - } - setup_sorting(report_usage, options); /* @@ -809,6 +819,12 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) } ret = __cmd_report(&report); + if (ret == K_SWITCH_INPUT_DATA) { + perf_session__delete(session); + goto repeat; + } else + ret = 0; + error: perf_session__delete(session); return ret; -- cgit v1.2.3-59-g8ed1b From 74b2133d19e776924b2773e27dd9d6940f1cc594 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 31 Jan 2013 13:54:37 +0100 Subject: perf evlist: Fix set event list leader The __perf_evlist__set_leader() was setting the leader for all events in the list except the first. Which means it assumed the first event already had event->leader = event. Seems like this should be the role of the function to also do this. This is a requirement for an upcoming patch set. Signed-off-by: Stephane Eranian Acked-by: Jiri Olsa Tested-by: Jiri Olsa Acked-by: Namhyung Kim Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20130131125437.GA3656@quad Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evlist.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index eddd5ebcd690..ecf123e080bb 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -122,8 +122,7 @@ void __perf_evlist__set_leader(struct list_head *list) leader->nr_members = evsel->idx - leader->idx + 1; list_for_each_entry(evsel, list, node) { - if (evsel != leader) - evsel->leader = leader; + evsel->leader = leader; } } -- cgit v1.2.3-59-g8ed1b From 2209001fd895e8932ae2c85bfca233758234499a Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 4 Feb 2013 13:05:54 +0100 Subject: perf tools: Check for flex and bison before continuing building Check whether both executables are present on the system before continuing with the build instead of failing halfway, if either are missing. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/1359979554-9160-1-git-send-email-bp@alien8.de Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 4b1044cbd84c..a158309a65ef 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -149,6 +149,8 @@ RM = rm -f MKDIR = mkdir FIND = find INSTALL = install +FLEX = flex +BISON= bison # sparse is architecture-neutral, which means that we need to tell it # explicitly what architecture to check for. Fix this up for yours.. @@ -158,6 +160,14 @@ ifneq ($(MAKECMDGOALS),clean) ifneq ($(MAKECMDGOALS),tags) -include config/feature-tests.mak +ifeq ($(call get-executable,$(FLEX)),) + dummy := $(error Error: $(FLEX) is missing on this system, please install it) +endif + +ifeq ($(call get-executable,$(BISON)),) + dummy := $(error Error: $(BISON) is missing on this system, please install it) +endif + ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y) CFLAGS := $(CFLAGS) -fstack-protector-all endif @@ -282,9 +292,6 @@ endif export PERL_PATH -FLEX = flex -BISON= bison - $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c -- cgit v1.2.3-59-g8ed1b From 51f27d1440cede5a413d279a20b38767b6f85097 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 6 Feb 2013 14:57:15 +0900 Subject: perf sort: Drop ip_[lr] arguments from _sort__sym_cmp() Current _sort__sym_cmp() function is used for comparing symbols between two hist entries on symbol, symbol_from and symbol_to sort keys. Those functions pass addresses of symbols but it's meaningless since it gets over-written inside of the _sort__sym_cmp function to a start address of the symbol. So just get rid of them. This might cause a difference than prior output for branch stacks since it seems not using start address of the symbol but branch address. However AFAICS it'd be same as it gets overwritten anyway. Also remove redundant part of code in sort__sym_cmp(). Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1360130237-9963-1-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 83336610faa9..03cabe5678d0 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -160,9 +160,10 @@ struct sort_entry sort_dso = { /* --sort symbol */ -static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r, - u64 ip_l, u64 ip_r) +static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) { + u64 ip_l, ip_r; + if (!sym_l || !sym_r) return cmp_null(sym_l, sym_r); @@ -178,21 +179,10 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r, static int64_t sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) { - u64 ip_l, ip_r; - if (!left->ms.sym && !right->ms.sym) return right->level - left->level; - if (!left->ms.sym || !right->ms.sym) - return cmp_null(left->ms.sym, right->ms.sym); - - if (left->ms.sym == right->ms.sym) - return 0; - - ip_l = left->ms.sym->start; - ip_r = right->ms.sym->start; - - return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r); + return _sort__sym_cmp(left->ms.sym, right->ms.sym); } static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, @@ -383,8 +373,7 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) if (!from_l->sym && !from_r->sym) return right->level - left->level; - return _sort__sym_cmp(from_l->sym, from_r->sym, from_l->addr, - from_r->addr); + return _sort__sym_cmp(from_l->sym, from_r->sym); } static int64_t @@ -396,7 +385,7 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) if (!to_l->sym && !to_r->sym) return right->level - left->level; - return _sort__sym_cmp(to_l->sym, to_r->sym, to_l->addr, to_r->addr); + return _sort__sym_cmp(to_l->sym, to_r->sym); } static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, -- cgit v1.2.3-59-g8ed1b From 553099857702bb77e541c47bde47f6863834d2e2 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 6 Feb 2013 14:57:16 +0900 Subject: perf sort: Make setup_sorting returns an error code Currently the setup_sorting() is called for parsing sort keys and exits if it failed to add the sort key. As it's included in libperf it'd be better returning an error code rather than exiting application inside of the library. Signed-off-by: Namhyung Kim Suggested-by: Arnaldo Carvalho de Melo Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1360130237-9963-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-annotate.c | 3 ++- tools/perf/builtin-diff.c | 4 +++- tools/perf/builtin-report.c | 3 ++- tools/perf/builtin-top.c | 3 ++- tools/perf/tests/hists_link.c | 3 ++- tools/perf/util/sort.c | 10 ++++++---- tools/perf/util/sort.h | 2 +- 7 files changed, 18 insertions(+), 10 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index dc870cf31b79..95a2ad3f043e 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -309,7 +309,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) if (symbol__init() < 0) return -1; - setup_sorting(annotate_usage, options); + if (setup_sorting() < 0) + usage_with_options(annotate_usage, options); if (argc) { /* diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 4af0b580b046..d207a97a2db1 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -605,7 +605,9 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) ui_init(); - setup_sorting(diff_usage, options); + if (setup_sorting() < 0) + usage_with_options(diff_usage, options); + setup_pager(); sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 91555d4885f4..96b5a7fee4bb 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -751,7 +751,8 @@ repeat: } - setup_sorting(report_usage, options); + if (setup_sorting() < 0) + usage_with_options(report_usage, options); /* * Only in the newt browser we are doing integrated annotation, diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index f561757b1bfa..72f6eb7b4173 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1129,7 +1129,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) if (sort_order == default_sort_order) sort_order = "dso,symbol"; - setup_sorting(top_usage, options); + if (setup_sorting() < 0) + usage_with_options(top_usage, options); if (top.use_stdio) use_browser = 0; diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c index 0afd9223bde7..1be64a6c5daf 100644 --- a/tools/perf/tests/hists_link.c +++ b/tools/perf/tests/hists_link.c @@ -449,7 +449,8 @@ int test__hists_link(void) goto out; /* default sort order (comm,dso,sym) will be used */ - setup_sorting(NULL, NULL); + if (setup_sorting() < 0) + goto out; machines__init(&machines); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 03cabe5678d0..d8b48827a17e 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -565,23 +565,25 @@ int sort_dimension__add(const char *tok) return -ESRCH; } -void setup_sorting(const char * const usagestr[], const struct option *opts) +int setup_sorting(void) { char *tmp, *tok, *str = strdup(sort_order); + int ret = 0; for (tok = strtok_r(str, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { - int ret = sort_dimension__add(tok); + ret = sort_dimension__add(tok); if (ret == -EINVAL) { error("Invalid --sort key: `%s'", tok); - usage_with_options(usagestr, opts); + break; } else if (ret == -ESRCH) { error("Unknown --sort key: `%s'", tok); - usage_with_options(usagestr, opts); + break; } } free(str); + return ret; } void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index e994ad3e9897..b13e56f6ccbe 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -160,7 +160,7 @@ struct sort_entry { extern struct sort_entry sort_thread; extern struct list_head hist_entry__sort_list; -void setup_sorting(const char * const usagestr[], const struct option *opts); +int setup_sorting(void); extern int sort_dimension__add(const char *); void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, const char *list_name, FILE *fp); -- cgit v1.2.3-59-g8ed1b From 5936f54d6ca2857d81188dcdff8c61b8fc482f53 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 6 Feb 2013 14:57:17 +0900 Subject: perf sort: Check return value of strdup() When setup_sorting() is called, 'str' is passed to strtok_r() but it's not checked to have a valid pointer. As strtok_r() accepts NULL pointer on a first argument and use the third argument in that case, it can cause a trouble since our third argument, tmp, is not initialized. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1360130237-9963-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index d8b48827a17e..d41926cb9e3f 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -570,6 +570,11 @@ int setup_sorting(void) char *tmp, *tok, *str = strdup(sort_order); int ret = 0; + if (str == NULL) { + error("Not enough memory to setup sort keys"); + return -ENOMEM; + } + for (tok = strtok_r(str, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { ret = sort_dimension__add(tok); -- cgit v1.2.3-59-g8ed1b From 0479b8b9cf4377df5d2c81506ce93326c31eff40 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 5 Feb 2013 14:12:42 -0700 Subject: perf evlist: Make event_copy local to mmaps I am getting segfaults *after* the time sorting of perf samples where the event type is off the charts: (gdb) bt \#0 0x0807b1b2 in hists__inc_nr_events (hists=0x80a99c4, type=1163281902) at util/hist.c:1225 \#1 0x08070795 in perf_session_deliver_event (session=0x80a9b90, event=0xf7a6aff8, sample=0xffffc318, tool=0xffffc520, file_offset=0) at util/session.c:884 \#2 0x0806f9b9 in flush_sample_queue (s=0x80a9b90, tool=0xffffc520) at util/session.c:555 \#3 0x0806fc53 in process_finished_round (tool=0xffffc520, event=0x0, session=0x80a9b90) at util/session.c:645 This is bizarre because the event has already been processed once -- before it was added to the samples queue -- and the event was found to be sane at that time. There seem to be 2 causes: 1. perf_evlist__mmap_read updates the read location even though there are outstanding references to events sitting in the mmap buffers via the ordered samples queue. 2. There is a single evlist->event_copy for all evlist entries. event_copy is used to handle an event wrapping at the mmap buffer boundary. This patch addresses the second problem - making event_copy local to each perf_mmap. With this change my highly repeatable use case no longer fails. The first problem is much more complicated and will be the subject of a future patch. Signed-off-by: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360098762-61827-1-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/perf.h | 26 -------------------------- tools/perf/util/evlist.c | 4 ++-- tools/perf/util/evlist.h | 29 ++++++++++++++++++++++++++++- 3 files changed, 30 insertions(+), 29 deletions(-) diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 8f3bf388e414..c2206c87fc9f 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -103,32 +103,6 @@ #include "util/types.h" #include -struct perf_mmap { - void *base; - int mask; - unsigned int prev; -}; - -static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm) -{ - struct perf_event_mmap_page *pc = mm->base; - int head = pc->data_head; - rmb(); - return head; -} - -static inline void perf_mmap__write_tail(struct perf_mmap *md, - unsigned long tail) -{ - struct perf_event_mmap_page *pc = md->base; - - /* - * ensure all reads are done before we write the tail out. - */ - /* mb(); */ - pc->data_tail = tail; -} - /* * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all * counters in the current task. diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index ecf123e080bb..bc4ad7977438 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -375,7 +375,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) if ((old & md->mask) + size != ((old + size) & md->mask)) { unsigned int offset = old; unsigned int len = min(sizeof(*event), size), cpy; - void *dst = &evlist->event_copy; + void *dst = &md->event_copy; do { cpy = min(md->mask + 1 - (offset & md->mask), len); @@ -385,7 +385,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) len -= cpy; } while (len); - event = &evlist->event_copy; + event = &md->event_copy; } old += size; diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 73579a25a93e..2dd07bd60b4f 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -17,6 +17,13 @@ struct perf_record_opts; #define PERF_EVLIST__HLIST_BITS 8 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) +struct perf_mmap { + void *base; + int mask; + unsigned int prev; + union perf_event event_copy; +}; + struct perf_evlist { struct list_head entries; struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; @@ -30,7 +37,6 @@ struct perf_evlist { pid_t pid; } workload; bool overwrite; - union perf_event event_copy; struct perf_mmap *mmap; struct pollfd *pollfd; struct thread_map *threads; @@ -136,4 +142,25 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) } size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); + +static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm) +{ + struct perf_event_mmap_page *pc = mm->base; + int head = pc->data_head; + rmb(); + return head; +} + +static inline void perf_mmap__write_tail(struct perf_mmap *md, + unsigned long tail) +{ + struct perf_event_mmap_page *pc = md->base; + + /* + * ensure all reads are done before we write the tail out. + */ + /* mb(); */ + pc->data_tail = tail; +} + #endif /* __PERF_EVLIST_H */ -- cgit v1.2.3-59-g8ed1b From 5ac59a8a77e3faa1eaf9bfe82a61e9396b082c3d Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 6 Feb 2013 15:46:01 +0100 Subject: perf tools: Add cpu_map processor socket level functions This patch adds: - cpu_map__get_socket: get socked id from cpu - cpu_map__build_socket_map: build socket map - cpu_map__socket: gets acutal socket from logical socket Those functions are used by uncore and processor socket-level aggregation modes. Signed-off-by: Stephane Eranian Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360161962-9675-2-git-send-email-eranian@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/cpumap.h | 9 ++++++++ 2 files changed, 63 insertions(+) diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 2b32ffa9ebdb..f817046e22b1 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -1,4 +1,5 @@ #include "util.h" +#include "sysfs.h" #include "../perf.h" #include "cpumap.h" #include @@ -201,3 +202,56 @@ void cpu_map__delete(struct cpu_map *map) { free(map); } + +int cpu_map__get_socket(struct cpu_map *map, int idx) +{ + FILE *fp; + const char *mnt; + char path[PATH_MAX]; + int cpu, ret; + + if (idx > map->nr) + return -1; + + cpu = map->map[idx]; + + mnt = sysfs_find_mountpoint(); + if (!mnt) + return -1; + + sprintf(path, + "%s/devices/system/cpu/cpu%d/topology/physical_package_id", + mnt, cpu); + + fp = fopen(path, "r"); + if (!fp) + return -1; + ret = fscanf(fp, "%d", &cpu); + fclose(fp); + return ret == 1 ? cpu : -1; +} + +int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp) +{ + struct cpu_map *sock; + int nr = cpus->nr; + int cpu, s1, s2; + + sock = calloc(1, sizeof(*sock) + nr * sizeof(int)); + if (!sock) + return -1; + + for (cpu = 0; cpu < nr; cpu++) { + s1 = cpu_map__get_socket(cpus, cpu); + for (s2 = 0; s2 < sock->nr; s2++) { + if (s1 == sock->map[s2]) + break; + } + if (s2 == sock->nr) { + sock->map[sock->nr] = s1; + sock->nr++; + } + } + *sockp = sock; + return 0; +} diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 2f68a3b8c285..161b00756a12 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -14,6 +14,15 @@ struct cpu_map *cpu_map__dummy_new(void); void cpu_map__delete(struct cpu_map *map); struct cpu_map *cpu_map__read(FILE *file); size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); +int cpu_map__get_socket(struct cpu_map *map, int idx); +int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp); + +static inline int cpu_map__socket(struct cpu_map *sock, int s) +{ + if (!sock || s > sock->nr || s < 0) + return 0; + return sock->map[s]; +} static inline int cpu_map__nr(const struct cpu_map *map) { -- cgit v1.2.3-59-g8ed1b From d7e7a451c13e784f497c054f1bd083d77be87498 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 6 Feb 2013 15:46:02 +0100 Subject: perf stat: Add per processor socket count aggregation This patch adds per-processor socket count aggregation for system-wide mode measurements. This is a useful mode to detect imbalance between sockets. To enable this mode, use --aggr-socket in addition to -a. (system-wide). The output includes the socket number and the number of online processors on that socket. This is useful to gauge the amount of aggregation. # ./perf stat -I 1000 -a --aggr-socket -e cycles sleep 2 # time socket cpus counts events 1.000097680 S0 4 5,788,785 cycles 2.000379943 S0 4 27,361,546 cycles 2.001167808 S0 4 818,275 cycles Signed-off-by: Stephane Eranian Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360161962-9675-3-git-send-email-eranian@google.com [ committer note: Added missing man page entry based on above comments ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-stat.txt | 9 ++- tools/perf/builtin-stat.c | 126 ++++++++++++++++++++++++++++++--- 2 files changed, 123 insertions(+), 12 deletions(-) diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 5289da3344e9..faf4f4feebcc 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -116,9 +116,16 @@ perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- m -I msecs:: --interval-print msecs:: - print count deltas every N milliseconds (minimum: 100ms) + Print count deltas every N milliseconds (minimum: 100ms) example: perf stat -I 1000 -e cycles -a sleep 5 +--aggr-socket:: +Aggregate counts per processor socket for system-wide mode measurements. This +is a useful mode to detect imbalance between sockets. To enable this mode, +use --aggr-socket in addition to -a. (system-wide). The output includes the +socket number and the number of online processors on that socket. This is +useful to gauge the amount of aggregation. + EXAMPLES -------- diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 0368a1036ad6..99848761f573 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -68,6 +68,7 @@ static void print_stat(int argc, const char **argv); static void print_counter_aggr(struct perf_evsel *counter, char *prefix); static void print_counter(struct perf_evsel *counter, char *prefix); +static void print_aggr_socket(char *prefix); static struct perf_evlist *evsel_list; @@ -79,6 +80,7 @@ static int run_count = 1; static bool no_inherit = false; static bool scale = true; static bool no_aggr = false; +static bool aggr_socket = false; static pid_t child_pid = -1; static bool null_run = false; static int detailed_run = 0; @@ -93,6 +95,7 @@ static const char *post_cmd = NULL; static bool sync_run = false; static unsigned int interval = 0; static struct timespec ref_time; +static struct cpu_map *sock_map; static volatile int done = 0; @@ -312,7 +315,9 @@ static void print_interval(void) sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep); if (num_print_interval == 0 && !csv_output) { - if (no_aggr) + if (aggr_socket) + fprintf(output, "# time socket cpus counts events\n"); + else if (no_aggr) fprintf(output, "# time CPU counts events\n"); else fprintf(output, "# time counts events\n"); @@ -321,7 +326,9 @@ static void print_interval(void) if (++num_print_interval == 25) num_print_interval = 0; - if (no_aggr) { + if (aggr_socket) + print_aggr_socket(prefix); + else if (no_aggr) { list_for_each_entry(counter, &evsel_list->entries, node) print_counter(counter, prefix); } else { @@ -349,6 +356,12 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv) ts.tv_nsec = 0; } + if (aggr_socket + && cpu_map__build_socket_map(evsel_list->cpus, &sock_map)) { + perror("cannot build socket map"); + return -1; + } + if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { perror("failed to create pipes"); return -1; @@ -529,13 +542,21 @@ static void print_noise(struct perf_evsel *evsel, double avg) print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); } -static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) +static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg) { double msecs = avg / 1e6; char cpustr[16] = { '\0', }; const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s"; - if (no_aggr) + if (aggr_socket) + sprintf(cpustr, "S%*d%s%*d%s", + csv_output ? 0 : -5, + cpu, + csv_sep, + csv_output ? 0 : 4, + nr, + csv_sep); + else if (no_aggr) sprintf(cpustr, "CPU%*d%s", csv_output ? 0 : -4, perf_evsel__cpus(evsel)->map[cpu], csv_sep); @@ -734,7 +755,7 @@ static void print_ll_cache_misses(int cpu, fprintf(output, " of all LL-cache hits "); } -static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) +static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg) { double total, ratio = 0.0; char cpustr[16] = { '\0', }; @@ -747,7 +768,15 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) else fmt = "%s%18.0f%s%-25s"; - if (no_aggr) + if (aggr_socket) + sprintf(cpustr, "S%*d%s%*d%s", + csv_output ? 0 : -5, + cpu, + csv_sep, + csv_output ? 0 : 4, + nr, + csv_sep); + else if (no_aggr) sprintf(cpustr, "CPU%*d%s", csv_output ? 0 : -4, perf_evsel__cpus(evsel)->map[cpu], csv_sep); @@ -853,6 +882,70 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) } } +static void print_aggr_socket(char *prefix) +{ + struct perf_evsel *counter; + u64 ena, run, val; + int cpu, s, s2, sock, nr; + + if (!sock_map) + return; + + for (s = 0; s < sock_map->nr; s++) { + sock = cpu_map__socket(sock_map, s); + list_for_each_entry(counter, &evsel_list->entries, node) { + val = ena = run = 0; + nr = 0; + for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { + s2 = cpu_map__get_socket(evsel_list->cpus, cpu); + if (s2 != sock) + continue; + val += counter->counts->cpu[cpu].val; + ena += counter->counts->cpu[cpu].ena; + run += counter->counts->cpu[cpu].run; + nr++; + } + if (prefix) + fprintf(output, "%s", prefix); + + if (run == 0 || ena == 0) { + fprintf(output, "S%*d%s%*d%s%*s%s%*s", + csv_output ? 0 : -5, + s, + csv_sep, + csv_output ? 0 : 4, + nr, + csv_sep, + csv_output ? 0 : 18, + counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, + csv_sep, + csv_output ? 0 : -24, + perf_evsel__name(counter)); + if (counter->cgrp) + fprintf(output, "%s%s", + csv_sep, counter->cgrp->name); + + fputc('\n', output); + continue; + } + + if (nsec_counter(counter)) + nsec_printout(sock, nr, counter, val); + else + abs_printout(sock, nr, counter, val); + + if (!csv_output) { + print_noise(counter, 1.0); + + if (run != ena) + fprintf(output, " (%.2f%%)", + 100.0 * run / ena); + } + fputc('\n', output); + } + } +} + /* * Print out the results of a single counter: * aggregated counts in system-wide mode @@ -882,9 +975,9 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix) } if (nsec_counter(counter)) - nsec_printout(-1, counter, avg); + nsec_printout(-1, 0, counter, avg); else - abs_printout(-1, counter, avg); + abs_printout(-1, 0, counter, avg); print_noise(counter, avg); @@ -940,9 +1033,9 @@ static void print_counter(struct perf_evsel *counter, char *prefix) } if (nsec_counter(counter)) - nsec_printout(cpu, counter, val); + nsec_printout(cpu, 0, counter, val); else - abs_printout(cpu, counter, val); + abs_printout(cpu, 0, counter, val); if (!csv_output) { print_noise(counter, 1.0); @@ -980,7 +1073,9 @@ static void print_stat(int argc, const char **argv) fprintf(output, ":\n\n"); } - if (no_aggr) { + if (aggr_socket) + print_aggr_socket(NULL); + else if (no_aggr) { list_for_each_entry(counter, &evsel_list->entries, node) print_counter(counter, NULL); } else { @@ -1228,6 +1323,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) "command to run after to the measured command"), OPT_UINTEGER('I', "interval-print", &interval, "print counts at regular interval in ms (>= 100)"), + OPT_BOOLEAN(0, "aggr-socket", &aggr_socket, "aggregate counts per processor socket"), OPT_END() }; const char * const stat_usage[] = { @@ -1314,6 +1410,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) usage_with_options(stat_usage, options); } + if (aggr_socket) { + if (!perf_target__has_cpu(&target)) { + fprintf(stderr, "--aggr-socket only available in system-wide mode (-a)\n"); + usage_with_options(stat_usage, options); + } + no_aggr = true; + } + if (add_default_attributes()) goto out; -- cgit v1.2.3-59-g8ed1b From 0c5268bf2218144469dde3228f14898fadbbcdcd Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 4 Feb 2013 13:32:55 +0100 Subject: perf hists browser: Add support to display whole group data for raw columns Currently we don't display group members' values for raw columns like 'Samples' and 'Period' when in group report mode. Uniting '__hpp__percent_fmt' and '__hpp__raw_fmt' function under new function __hpp__fmt. It's basically '__hpp__percent_fmt' code with new 'fmt_percent' bool parameter added saying whether raw number or percentage should be printed. This way raw columns print out all the group members when in group report mode, like: $ perf record -e '{cycles,cache-misses}' ls ... $ perf report --group --show-total-period --stdio ... # Overhead Period Command Shared Object Symbol # ................ ........................ ....... ................. ................................. # 23.63% 11.24% 3331335 317 ls [kernel.kallsyms] [k] __lock_acquire 12.72% 0.00% 1793100 0 ls [kernel.kallsyms] [k] native_sched_clock 9.72% 0.00% 1369920 0 ls libc-2.14.90.so [.] _nl_find_locale 0.03% 0.07% 4476 2 ls [kernel.kallsyms] [k] intel_pmu_enable_all 0.00% 11.73% 0 331 ls ld-2.14.90.so [.] _dl_cache_libcmp 0.00% 11.06% 0 312 ls [kernel.kallsyms] [k] vma_interval_tree_insert Signed-off-by: Jiri Olsa Acked-by: Namhyung Kim Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1359981185-16819-2-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/hist.c | 53 ++++++++++++++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index a47ce98c2cb1..d671e63aa351 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -9,18 +9,24 @@ typedef int (*hpp_snprint_fn)(char *buf, size_t size, const char *fmt, ...); -static int __hpp__percent_fmt(struct perf_hpp *hpp, struct hist_entry *he, - u64 (*get_field)(struct hist_entry *), - const char *fmt, hpp_snprint_fn print_fn) +static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, + u64 (*get_field)(struct hist_entry *), + const char *fmt, hpp_snprint_fn print_fn, + bool fmt_percent) { int ret; - double percent = 0.0; struct hists *hists = he->hists; - if (hists->stats.total_period) - percent = 100.0 * get_field(he) / hists->stats.total_period; + if (fmt_percent) { + double percent = 0.0; + + if (hists->stats.total_period) + percent = 100.0 * get_field(he) / + hists->stats.total_period; - ret = print_fn(hpp->buf, hpp->size, fmt, percent); + ret = print_fn(hpp->buf, hpp->size, fmt, percent); + } else + ret = print_fn(hpp->buf, hpp->size, fmt, get_field(he)); if (symbol_conf.event_group) { int prev_idx, idx_delta; @@ -49,11 +55,15 @@ static int __hpp__percent_fmt(struct perf_hpp *hpp, struct hist_entry *he, * have no sample */ ret += print_fn(hpp->buf + ret, hpp->size - ret, - fmt, 0.0); + fmt, 0); } - ret += print_fn(hpp->buf + ret, hpp->size - ret, - fmt, 100.0 * period / total); + if (fmt_percent) + ret += print_fn(hpp->buf + ret, hpp->size - ret, + fmt, 100.0 * period / total); + else + ret += print_fn(hpp->buf + ret, hpp->size - ret, + fmt, period); prev_idx = perf_evsel__group_idx(evsel); } @@ -65,23 +75,12 @@ static int __hpp__percent_fmt(struct perf_hpp *hpp, struct hist_entry *he, * zero-fill group members at last which have no sample */ ret += print_fn(hpp->buf + ret, hpp->size - ret, - fmt, 0.0); + fmt, 0); } } return ret; } -static int __hpp__raw_fmt(struct perf_hpp *hpp, struct hist_entry *he, - u64 (*get_field)(struct hist_entry *), - const char *fmt, hpp_snprint_fn print_fn) -{ - int ret; - - ret = print_fn(hpp->buf, hpp->size, fmt, get_field(he)); - return ret; -} - - #define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ static int hpp__header_##_type(struct perf_hpp *hpp) \ { \ @@ -116,16 +115,16 @@ static u64 he_get_##_field(struct hist_entry *he) \ \ static int hpp__color_##_type(struct perf_hpp *hpp, struct hist_entry *he) \ { \ - return __hpp__percent_fmt(hpp, he, he_get_##_field, " %6.2f%%", \ - (hpp_snprint_fn)percent_color_snprintf); \ + return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \ + (hpp_snprint_fn)percent_color_snprintf, true); \ } #define __HPP_ENTRY_PERCENT_FN(_type, _field) \ static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he) \ { \ const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \ - return __hpp__percent_fmt(hpp, he, he_get_##_field, fmt, \ - scnprintf); \ + return __hpp__fmt(hpp, he, he_get_##_field, fmt, \ + scnprintf, true); \ } #define __HPP_ENTRY_RAW_FN(_type, _field) \ @@ -137,7 +136,7 @@ static u64 he_get_raw_##_field(struct hist_entry *he) \ static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he) \ { \ const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \ - return __hpp__raw_fmt(hpp, he, he_get_raw_##_field, fmt, scnprintf); \ + return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, scnprintf, false); \ } #define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \ -- cgit v1.2.3-59-g8ed1b From b22e79395c0fe4c86dd35745a929366034386ccc Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 5 Feb 2013 17:05:50 +0100 Subject: perf perl scripts: Fix SIGALRM and pipe read race for rwtop Fixing rwtop script race. The issue is caused by rwtop script triggering SIGALRM and underneath pipe reading layer reporting error when interrupted. Fixing this by setting SA_RESTART for rwtop SIGALRM handler, which avoids interruption of the pipe reading layer. The discussion for this issue & fix is here: https://lkml.org/lkml/2012/9/18/123 Signed-off-by: Jiri Olsa Original-patch-by: Andrew Jones Cc: Andrew Jones Cc: Corey Ashford Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360080351-3246-2-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/perl/rwtop.pl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/scripts/perl/rwtop.pl b/tools/perf/scripts/perl/rwtop.pl index 4bb3ecd33472..8b20787021c1 100644 --- a/tools/perf/scripts/perl/rwtop.pl +++ b/tools/perf/scripts/perl/rwtop.pl @@ -17,6 +17,7 @@ use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; use lib "./Perf-Trace-Util/lib"; use Perf::Trace::Core; use Perf::Trace::Util; +use POSIX qw/SIGALRM SA_RESTART/; my $default_interval = 3; my $nlines = 20; @@ -90,7 +91,10 @@ sub syscalls::sys_enter_write sub trace_begin { - $SIG{ALRM} = \&set_print_pending; + my $sa = POSIX::SigAction->new(\&set_print_pending); + $sa->flags(SA_RESTART); + $sa->safe(1); + POSIX::sigaction(SIGALRM, $sa) or die "Can't set SIGALRM handler: $!\n"; alarm 1; } -- cgit v1.2.3-59-g8ed1b From 89bb67ff935d461544fed87174bb13dcc4bac673 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 4 Feb 2013 10:56:42 +0100 Subject: perf tools: Fix perf_evsel::exclude_GH handling Let the perf_evsel::exclude_GH only prevent the reset of exclude_host and exclude_guest attributes in case they were already set. We cannot reset their values to 0, because they might have other defaults set by event_attr_init. Signed-off-by: Jiri Olsa Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1359971803-2343-2-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/parse-events.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 4e0f5c2a9fda..c84f48cf9678 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -699,14 +699,6 @@ static int get_event_modifier(struct event_modifier *mod, char *str, int exclude = eu | ek | eh; int exclude_GH = evsel ? evsel->exclude_GH : 0; - /* - * We are here for group and 'GH' was not set as event - * modifier and whatever event/group modifier override - * default 'GH' setup. - */ - if (evsel && !exclude_GH) - eH = eG = 0; - memset(mod, 0, sizeof(*mod)); while (*str) { -- cgit v1.2.3-59-g8ed1b From 5a30a99fb4bb4c9374ea122a2a7c9cd9d26ecdd6 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 4 Feb 2013 10:56:43 +0100 Subject: perf tests: Adding automated parsing tests for group :GH modifiers The ':GH' group modifier handling was just recently fixed, adding some autommated tests to keep it that way. Adding tests for following events: "{cycles,cache-misses:G}:H" "{cycles,cache-misses:H}:G" "{cycles:G,cache-misses:H}:u" "{cycles:G,cache-misses:H}:uG" Plus fixing test__group2 test. Signed-off-by: Jiri Olsa Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1359971803-2343-3-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/parse-events.c | 178 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 177 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 80a8daf54a63..c5636f36fe31 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -577,7 +577,7 @@ static int test__group2(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); - TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); @@ -811,6 +811,166 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) return 0; } +static int test__group_gh1(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + + /* cycles + :H group modifier */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + + /* cache-misses:G + :H group modifier */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + + return 0; +} + +static int test__group_gh2(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + + /* cycles + :G group modifier */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + + /* cache-misses:H + :G group modifier */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + + return 0; +} + +static int test__group_gh3(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + + /* cycles:G + :u group modifier */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + + /* cache-misses:H + :u group modifier */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + + return 0; +} + +static int test__group_gh4(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + + /* cycles:G + :uG group modifier */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + + /* cache-misses:H + :uG group modifier */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + + return 0; +} + static int count_tracepoints(void) { char events_path[PATH_MAX]; @@ -1011,6 +1171,22 @@ static struct evlist_test test__events[] = { .name = "*:*", .check = test__all_tracepoints, }, + [34] = { + .name = "{cycles,cache-misses:G}:H", + .check = test__group_gh1, + }, + [35] = { + .name = "{cycles,cache-misses:H}:G", + .check = test__group_gh2, + }, + [36] = { + .name = "{cycles:G,cache-misses:H}:u", + .check = test__group_gh3, + }, + [37] = { + .name = "{cycles:G,cache-misses:H}:uG", + .check = test__group_gh4, + }, }; static struct evlist_test test__events_pmu[] = { -- cgit v1.2.3-59-g8ed1b From 91b988048bea24eae386da3141d247ccea795a81 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 30 Jan 2013 20:05:49 -0500 Subject: perf tools: Fix calloc argument ordering A sweep of the kernel for regex "kcalloc(sizeof" turned up 2 reversed args, fixed in commit d3d09e18203dba16a9dbdb2b4cc673d90748cdd1 ("EDAC: Fix kcalloc argument order") and also fixed in the networking commit a1b1add07fa794974573d93483d68e373edfe7bd ("gro: Fix kcalloc argument order"). I know that was the regex used, because on seeing the 1st of these changes, I wondered "how many other instances of this are there" and I happened to just use "calloc(sizeof" as a regex and it in turn found these additional reversed args instances in the perf code. In the kcalloc cases, the changes are cosmetic, since the numbers are simply multiplied. I had no desire to go data mining in userspace to see if the same thing held true there, however. Signed-off-by: Paul Gortmaker Cc: Ingo Molnar Cc: Joe Perches Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1359594349-25912-1-git-send-email-paul.gortmaker@windriver.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/callchain.c | 2 +- tools/perf/util/header.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index d3b3f5d82137..42b6a632fe7b 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -444,7 +444,7 @@ int callchain_cursor_append(struct callchain_cursor *cursor, struct callchain_cursor_node *node = *cursor->last; if (!node) { - node = calloc(sizeof(*node), 1); + node = calloc(1, sizeof(*node)); if (!node) return -ENOMEM; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 8022b5254f75..f4bfd79ef6a7 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2253,7 +2253,7 @@ static int perf_header__adds_write(struct perf_header *header, if (!nr_sections) return 0; - feat_sec = p = calloc(sizeof(*feat_sec), nr_sections); + feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); if (feat_sec == NULL) return -ENOMEM; @@ -2425,7 +2425,7 @@ int perf_header__process_sections(struct perf_header *header, int fd, if (!nr_sections) return 0; - feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections); + feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); if (!feat_sec) return -1; -- cgit v1.2.3-59-g8ed1b From e35ef355ad3dd26bff79c8711f070ac69501dfa3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 6 Feb 2013 17:20:02 -0300 Subject: perf evlist: Pass the event_group info via perf_attr_details So that we avoid dragging symbol.o into the python binding. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-izjubje7ltd1srji5wb0ygwi@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-evlist.c | 4 ++-- tools/perf/util/evsel.c | 2 +- tools/perf/util/evsel.h | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c index 85a5e35dd147..05bd9dfe875c 100644 --- a/tools/perf/builtin-evlist.c +++ b/tools/perf/builtin-evlist.c @@ -39,7 +39,7 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"), OPT_BOOLEAN('v', "verbose", &details.verbose, "Show all event attr details"), - OPT_BOOLEAN('g', "group", &symbol_conf.event_group, + OPT_BOOLEAN('g', "group", &details.event_group, "Show event group information"), OPT_END() }; @@ -52,7 +52,7 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) if (argc) usage_with_options(evlist_usage, options); - if (symbol_conf.event_group && (details.verbose || details.freq)) { + if (details.event_group && (details.verbose || details.freq)) { pr_err("--group option is not compatible with other options\n"); usage_with_options(evlist_usage, options); } diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index a54701504606..9c82f98f26de 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1391,7 +1391,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel, bool first = true; int printed = 0; - if (symbol_conf.event_group) { + if (details->event_group) { struct perf_evsel *pos; if (!perf_evsel__is_group_leader(evsel)) diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 8512f6a8a6ea..52021c3087df 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -254,6 +254,7 @@ static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel) struct perf_attr_details { bool freq; bool verbose; + bool event_group; }; int perf_evsel__fprintf(struct perf_evsel *evsel, -- cgit v1.2.3-59-g8ed1b From 88fd2b6a76264e9e14463f532caae09d82a53207 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 6 Feb 2013 17:21:47 -0300 Subject: perf python: Link with sysfs.o So that we fix this regression: [root@sandy linux]# perf test -v 15 15: Try 'use perf' in python, checking link problems : --- start --- Traceback (most recent call last): File "", line 1, in ImportError: /home/acme/git/build/perf/python/perf.so: undefined symbol: sysfs_find_mountpoint ---- end ---- Try 'use perf' in python, checking link problems: FAILED! [root@sandy linux]# Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-8pf64bsdywg1gl9m55ul77hg@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/python-ext-sources | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index c40c2d33199e..64536a993f4a 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources @@ -18,4 +18,5 @@ util/cgroup.c util/debugfs.c util/rblist.c util/strlist.c +util/sysfs.c ../../lib/rbtree.c -- cgit v1.2.3-59-g8ed1b From bbc33d05930f870ea049eae5ed980f8b827d0813 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 21 Nov 2012 16:55:38 +0100 Subject: uprobes: Move __set_bit(UPROBE_SKIP_SSTEP) into alloc_uprobe() Cosmetic. __set_bit(UPROBE_SKIP_SSTEP) is the part of initialization, it is not clear why it is set in insert_uprobe(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 30ea9a4f4ab4..afbab2cb2742 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -430,9 +430,6 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe) u = __insert_uprobe(uprobe); spin_unlock(&uprobes_treelock); - /* For now assume that the instruction need not be single-stepped */ - __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags); - return u; } @@ -454,6 +451,8 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) uprobe->offset = offset; init_rwsem(&uprobe->consumer_rwsem); mutex_init(&uprobe->copy_mutex); + /* For now assume that the instruction need not be single-stepped */ + __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags); /* add to uprobes_tree, sorted on inode:offset */ cur_uprobe = insert_uprobe(uprobe); -- cgit v1.2.3-59-g8ed1b From f0744af7d0fde190674064c54e2ff60b34ac71fe Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 21 Nov 2012 18:01:43 +0100 Subject: uprobes: Kill the pointless inode/uc checks in register/unregister register/unregister verifies that inode/uc != NULL. For what? This really looks like "hide the potential problem", the caller should pass the valid data. register() also checks uc->next == NULL, probably to prevent the double-register but the caller can do other stupid/wrong things. If we do this check, then we should document that uc->next should be cleared before register() and add BUG_ON(). Also add the small comment about the i_size_read() check. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index afbab2cb2742..a39d8163b713 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -844,9 +844,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * struct uprobe *uprobe; int ret; - if (!inode || !uc || uc->next) - return -EINVAL; - + /* Racy, just to catch the obvious mistakes */ if (offset > i_size_read(inode)) return -EINVAL; @@ -883,9 +881,6 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume { struct uprobe *uprobe; - if (!inode || !uc) - return; - uprobe = find_uprobe(inode, offset); if (!uprobe) return; -- cgit v1.2.3-59-g8ed1b From fe20d71f25400cccc8bffef865f79250be7dbc81 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 21 Nov 2012 17:32:30 +0100 Subject: uprobes: Kill uprobe_consumer->filter() uprobe_consumer->filter() is pointless in its current form, kill it. We will add it back, but with the different signature/semantics. Perhaps we will even re-introduce the callsite in handler_chain(), but not to just skip uc->handler(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- include/linux/uprobes.h | 5 ----- kernel/events/uprobes.c | 6 ++---- kernel/trace/trace_uprobe.c | 1 - 3 files changed, 2 insertions(+), 10 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 4f628a6fc5b4..83742b91ff73 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -37,11 +37,6 @@ struct inode; struct uprobe_consumer { int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); - /* - * filter is optional; If a filter exists, handler is run - * if and only if filter returns true. - */ - bool (*filter)(struct uprobe_consumer *self, struct task_struct *task); struct uprobe_consumer *next; }; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index a39d8163b713..5cbebac27c01 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -477,10 +477,8 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) return; down_read(&uprobe->consumer_rwsem); - for (uc = uprobe->consumers; uc; uc = uc->next) { - if (!uc->filter || uc->filter(uc, current)) - uc->handler(uc, regs); - } + for (uc = uprobe->consumers; uc; uc = uc->next) + uc->handler(uc, regs); up_read(&uprobe->consumer_rwsem); } diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 87b6db4ccbc5..e668024773d4 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -550,7 +550,6 @@ static int probe_event_enable(struct trace_uprobe *tu, int flag) return -EINTR; utc->cons.handler = uprobe_dispatcher; - utc->cons.filter = NULL; ret = uprobe_register(tu->inode, tu->offset, &utc->cons); if (ret) { kfree(utc); -- cgit v1.2.3-59-g8ed1b From 63633cbf82840d972248f11d2122b261d0d4779a Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 22 Nov 2012 18:30:15 +0100 Subject: uprobes: Introduce filter_chain() Add the new helper filter_chain(). Currently it is only placeholder, the comment explains what is should do. We will change it later to consult every consumer to decide whether we need to install the swbp. Until then it works as if any consumer returns true, this matches the current behavior. Change install_breakpoint() to call filter_chain() instead of checking uprobe->consumers != NULL. We obviously need this, and this equally closes the race with _unregister(). Change remove_breakpoint() to call this helper too. Currently this is pointless because remove_breakpoint() is only called when the last consumer goes away, but we will change this. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 5cbebac27c01..c38bf37d0aca 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -614,6 +614,18 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, return ret; } +static bool filter_chain(struct uprobe *uprobe) +{ + /* + * TODO: + * for_each_consumer(uc) + * if (uc->filter(...)) + * return true; + * return false; + */ + return uprobe->consumers != NULL; +} + static int install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long vaddr) @@ -624,11 +636,10 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, /* * If probe is being deleted, unregister thread could be done with * the vma-rmap-walk through. Adding a probe now can be fatal since - * nobody will be able to cleanup. Also we could be from fork or - * mremap path, where the probe might have already been inserted. - * Hence behave as if probe already existed. + * nobody will be able to cleanup. But in this case filter_chain() + * must return false, all consumers have gone away. */ - if (!uprobe->consumers) + if (!filter_chain(uprobe)) return 0; ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); @@ -655,10 +666,12 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, static int remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) { - /* can happen if uprobe_register() fails */ if (!test_bit(MMF_HAS_UPROBES, &mm->flags)) return 0; + if (filter_chain(uprobe)) + return 0; + set_bit(MMF_RECALC_UPROBES, &mm->flags); return set_orig_insn(&uprobe->arch, mm, vaddr); } @@ -1382,6 +1395,7 @@ static void mmf_recalc_uprobes(struct mm_struct *mm) * This is not strictly accurate, we can race with * uprobe_unregister() and see the already removed * uprobe if delete_uprobe() was not yet called. + * Or this uprobe can be filtered out. */ if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) return; -- cgit v1.2.3-59-g8ed1b From 04aab9b2006bbdeff78dc162f206fdfebeca97d9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 23 Nov 2012 19:43:50 +0100 Subject: uprobes: _unregister() should always do register_for_each_vma(false) uprobe_unregister() removes the breakpoints only if the last consumer goes away. To support the filtering it should do this every time, we want to remove the breakpoints which nobody else want to keep. Note: given that filter_chain() is not actually implemented, this patch itself doesn't change the behaviour yet, register_for_each_vma(false) is a heavy "nop" unless there are no more consumers. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c38bf37d0aca..940199084639 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -825,12 +825,20 @@ static int __uprobe_register(struct uprobe *uprobe) return register_for_each_vma(uprobe, true); } -static void __uprobe_unregister(struct uprobe *uprobe) +static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) { - if (!register_for_each_vma(uprobe, false)) - delete_uprobe(uprobe); + int err; + + if (!consumer_del(uprobe, uc)) /* WARN? */ + return; - /* TODO : cant unregister? schedule a worker thread */ + err = register_for_each_vma(uprobe, false); + if (!uprobe->consumers) { + clear_bit(UPROBE_RUN_HANDLER, &uprobe->flags); + /* TODO : cant unregister? schedule a worker thread */ + if (!err) + delete_uprobe(uprobe); + } } /* @@ -868,8 +876,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * } else if (!consumer_add(uprobe, uc)) { ret = __uprobe_register(uprobe); if (ret) { - uprobe->consumers = NULL; - __uprobe_unregister(uprobe); + __uprobe_unregister(uprobe, uc); } else { set_bit(UPROBE_RUN_HANDLER, &uprobe->flags); } @@ -897,14 +904,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume return; mutex_lock(uprobes_hash(inode)); - - if (consumer_del(uprobe, uc)) { - if (!uprobe->consumers) { - __uprobe_unregister(uprobe); - clear_bit(UPROBE_RUN_HANDLER, &uprobe->flags); - } - } - + __uprobe_unregister(uprobe, uc); mutex_unlock(uprobes_hash(inode)); put_uprobe(uprobe); } -- cgit v1.2.3-59-g8ed1b From 9a98e03cc145c994da824dac7602334f50feb670 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 23 Nov 2012 20:15:17 +0100 Subject: uprobes: _register() should always do register_for_each_vma(true) To support the filtering uprobe_register() should do register_for_each_vma(true) every time the new consumer comes, we need to install the previously nacked breakpoints. Note: - uprobes_mutex[] should die, what it actually protects is alloc_uprobe(). - UPROBE_RUN_HANDLER should die too, obviously it can't work unless uprobe has a single consumer. The consumer should serialize with _register/_unregister itself. Or this flag should live in uprobe_consumer->state. - Perhaps we can do some optimizations later. For example, if filter_chain() never returns false uprobe can record this fact and avoid the unnecessary register_for_each_vma(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 940199084639..d1d1394bca8b 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -482,16 +482,12 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) up_read(&uprobe->consumer_rwsem); } -/* Returns the previous consumer */ -static struct uprobe_consumer * -consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) +static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) { down_write(&uprobe->consumer_rwsem); uc->next = uprobe->consumers; uprobe->consumers = uc; up_write(&uprobe->consumer_rwsem); - - return uc->next; } /* @@ -820,9 +816,15 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) return err; } -static int __uprobe_register(struct uprobe *uprobe) +static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc) { - return register_for_each_vma(uprobe, true); + int err; + + consumer_add(uprobe, uc); + err = register_for_each_vma(uprobe, true); + if (!err) /* TODO: pointless unless the first consumer */ + set_bit(UPROBE_RUN_HANDLER, &uprobe->flags); + return err; } static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) @@ -867,21 +869,14 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * if (offset > i_size_read(inode)) return -EINVAL; - ret = 0; + ret = -ENOMEM; mutex_lock(uprobes_hash(inode)); uprobe = alloc_uprobe(inode, offset); - - if (!uprobe) { - ret = -ENOMEM; - } else if (!consumer_add(uprobe, uc)) { - ret = __uprobe_register(uprobe); - if (ret) { + if (uprobe) { + ret = __uprobe_register(uprobe, uc); + if (ret) __uprobe_unregister(uprobe, uc); - } else { - set_bit(UPROBE_RUN_HANDLER, &uprobe->flags); - } } - mutex_unlock(uprobes_hash(inode)); if (uprobe) put_uprobe(uprobe); -- cgit v1.2.3-59-g8ed1b From e591c8d78e49e6206935cf31c4d2b603bbb29166 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 24 Nov 2012 17:29:40 +0100 Subject: uprobes: Introduce uprobe->register_rwsem Introduce uprobe->register_rwsem. It is taken for writing around __uprobe_register/unregister. Change handler_chain() to use this sem rather than consumer_rwsem. The main reason for this change is that we have the nasty problem with mmap_sem/consumer_rwsem dependency. filter_chain() needs to protect uprobe->consumers like handler_chain(), but they can not use the same lock. filter_chain() can be called under ->mmap_sem (currently this is always true), but we want to allow ->handler() to play with the probed task's memory, and this needs ->mmap_sem. Alternatively we could use srcu, but synchronize_srcu() is very slow and ->register_rwsem allows us to do more. In particular, we can teach handler_chain() to do remove_breakpoint() if this bp is "nacked" by all consumers, we know that we can't race with the new consumer which does uprobe_register(). See also the next patches. uprobes_mutex[] is almost ready to die. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d1d1394bca8b..61d0fa6b5012 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -91,6 +91,7 @@ static atomic_t uprobe_events = ATOMIC_INIT(0); struct uprobe { struct rb_node rb_node; /* node in the rb tree */ atomic_t ref; + struct rw_semaphore register_rwsem; struct rw_semaphore consumer_rwsem; struct mutex copy_mutex; /* TODO: kill me and UPROBE_COPY_INSN */ struct list_head pending_list; @@ -449,6 +450,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) uprobe->inode = igrab(inode); uprobe->offset = offset; + init_rwsem(&uprobe->register_rwsem); init_rwsem(&uprobe->consumer_rwsem); mutex_init(&uprobe->copy_mutex); /* For now assume that the instruction need not be single-stepped */ @@ -476,10 +478,10 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) if (!test_bit(UPROBE_RUN_HANDLER, &uprobe->flags)) return; - down_read(&uprobe->consumer_rwsem); + down_read(&uprobe->register_rwsem); for (uc = uprobe->consumers; uc; uc = uc->next) uc->handler(uc, regs); - up_read(&uprobe->consumer_rwsem); + up_read(&uprobe->register_rwsem); } static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) @@ -873,9 +875,11 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * mutex_lock(uprobes_hash(inode)); uprobe = alloc_uprobe(inode, offset); if (uprobe) { + down_write(&uprobe->register_rwsem); ret = __uprobe_register(uprobe, uc); if (ret) __uprobe_unregister(uprobe, uc); + up_write(&uprobe->register_rwsem); } mutex_unlock(uprobes_hash(inode)); if (uprobe) @@ -899,7 +903,9 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume return; mutex_lock(uprobes_hash(inode)); + down_write(&uprobe->register_rwsem); __uprobe_unregister(uprobe, uc); + up_write(&uprobe->register_rwsem); mutex_unlock(uprobes_hash(inode)); put_uprobe(uprobe); } -- cgit v1.2.3-59-g8ed1b From 1ff6fee5e62c57d5923b805bb4206acb7953f16e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 24 Nov 2012 18:15:46 +0100 Subject: uprobes: Change filter_chain() to iterate ->consumers list Now that it safe to use ->consumer_rwsem under ->mmap_sem we can almost finish the implementation of filter_chain(). It still lacks the actual uc->filter(...) call but othewrwise it is ready, just it pretends that ->filter() always returns true. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 61d0fa6b5012..4d0452363686 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -614,14 +614,19 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, static bool filter_chain(struct uprobe *uprobe) { - /* - * TODO: - * for_each_consumer(uc) - * if (uc->filter(...)) - * return true; - * return false; - */ - return uprobe->consumers != NULL; + struct uprobe_consumer *uc; + bool ret = false; + + down_read(&uprobe->consumer_rwsem); + for (uc = uprobe->consumers; uc; uc = uc->next) { + /* TODO: ret = uc->filter(...) */ + ret = true; + if (ret) + break; + } + up_read(&uprobe->consumer_rwsem); + + return ret; } static int -- cgit v1.2.3-59-g8ed1b From bb929284be40cbbdb347690742557d708fd504a9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 24 Nov 2012 18:27:08 +0100 Subject: uprobes: Kill UPROBE_RUN_HANDLER flag Simply remove UPROBE_RUN_HANDLER and the corresponding code. It can only help if uprobe has a single consumer, and in fact it is no longer needed after handler_chain() was changed to use ->register_rwsem, we simply can not race with uprobe_register(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 4d0452363686..7ec2eb278634 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -83,10 +83,8 @@ static atomic_t uprobe_events = ATOMIC_INIT(0); /* Have a copy of original instruction */ #define UPROBE_COPY_INSN 0 -/* Dont run handlers when first register/ last unregister in progress*/ -#define UPROBE_RUN_HANDLER 1 /* Can skip singlestep */ -#define UPROBE_SKIP_SSTEP 2 +#define UPROBE_SKIP_SSTEP 1 struct uprobe { struct rb_node rb_node; /* node in the rb tree */ @@ -475,9 +473,6 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) { struct uprobe_consumer *uc; - if (!test_bit(UPROBE_RUN_HANDLER, &uprobe->flags)) - return; - down_read(&uprobe->register_rwsem); for (uc = uprobe->consumers; uc; uc = uc->next) uc->handler(uc, regs); @@ -825,13 +820,8 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc) { - int err; - consumer_add(uprobe, uc); - err = register_for_each_vma(uprobe, true); - if (!err) /* TODO: pointless unless the first consumer */ - set_bit(UPROBE_RUN_HANDLER, &uprobe->flags); - return err; + return register_for_each_vma(uprobe, true); } static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) @@ -842,12 +832,9 @@ static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *u return; err = register_for_each_vma(uprobe, false); - if (!uprobe->consumers) { - clear_bit(UPROBE_RUN_HANDLER, &uprobe->flags); - /* TODO : cant unregister? schedule a worker thread */ - if (!err) - delete_uprobe(uprobe); - } + /* TODO : cant unregister? schedule a worker thread */ + if (!uprobe->consumers && !err) + delete_uprobe(uprobe); } /* -- cgit v1.2.3-59-g8ed1b From d4d3ccc6d1eb74bd315d49a3829c5ad6c48d21b0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 24 Nov 2012 18:51:34 +0100 Subject: uprobes: Kill uprobe->copy_mutex Now that ->register_rwsem is safe under ->mmap_sem we can kill ->copy_mutex and abuse down_write(&uprobe->consumer_rwsem). This makes prepare_uprobe() even more ugly, but we should kill it anyway. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 7ec2eb278634..40ced98b2bc8 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -91,7 +91,6 @@ struct uprobe { atomic_t ref; struct rw_semaphore register_rwsem; struct rw_semaphore consumer_rwsem; - struct mutex copy_mutex; /* TODO: kill me and UPROBE_COPY_INSN */ struct list_head pending_list; struct uprobe_consumer *consumers; struct inode *inode; /* Also hold a ref to inode */ @@ -450,7 +449,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) uprobe->offset = offset; init_rwsem(&uprobe->register_rwsem); init_rwsem(&uprobe->consumer_rwsem); - mutex_init(&uprobe->copy_mutex); /* For now assume that the instruction need not be single-stepped */ __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags); @@ -578,7 +576,8 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) return ret; - mutex_lock(&uprobe->copy_mutex); + /* TODO: move this into _register, until then we abuse this sem. */ + down_write(&uprobe->consumer_rwsem); if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) goto out; @@ -602,7 +601,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, set_bit(UPROBE_COPY_INSN, &uprobe->flags); out: - mutex_unlock(&uprobe->copy_mutex); + up_write(&uprobe->consumer_rwsem); return ret; } -- cgit v1.2.3-59-g8ed1b From 441f1eb7db8babe2b6b4bc805f023739dbb70e33 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 25 Nov 2012 19:54:29 +0100 Subject: uprobes: Kill uprobe_events, use RB_EMPTY_ROOT() instead uprobe_events counts the number of uprobes in uprobes_tree but it is used as a boolean. We can use RB_EMPTY_ROOT() instead. Probably no_uprobe_events() added by this patch can have more callers, say, mmf_recalc_uprobes(). Signed-off-by: Oleg Nesterov Acked-by: Anton Arapov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 40ced98b2bc8..5d38b40644b8 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -41,6 +41,11 @@ #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE static struct rb_root uprobes_tree = RB_ROOT; +/* + * allows us to skip the uprobe_mmap if there are no uprobe events active + * at this time. Probably a fine grained per inode count is better? + */ +#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ @@ -74,13 +79,6 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; static struct percpu_rw_semaphore dup_mmap_sem; -/* - * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe - * events active at this time. Probably a fine grained per inode count is - * better? - */ -static atomic_t uprobe_events = ATOMIC_INIT(0); - /* Have a copy of original instruction */ #define UPROBE_COPY_INSN 0 /* Can skip singlestep */ @@ -460,8 +458,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) kfree(uprobe); uprobe = cur_uprobe; iput(inode); - } else { - atomic_inc(&uprobe_events); } return uprobe; @@ -685,7 +681,6 @@ static void delete_uprobe(struct uprobe *uprobe) spin_unlock(&uprobes_treelock); iput(uprobe->inode); put_uprobe(uprobe); - atomic_dec(&uprobe_events); } struct map_info { @@ -975,7 +970,7 @@ int uprobe_mmap(struct vm_area_struct *vma) struct uprobe *uprobe, *u; struct inode *inode; - if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) + if (no_uprobe_events() || !valid_vma(vma, true)) return 0; inode = vma->vm_file->f_mapping->host; @@ -1021,7 +1016,7 @@ vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long e */ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) + if (no_uprobe_events() || !valid_vma(vma, false)) return; if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ -- cgit v1.2.3-59-g8ed1b From 06b7bcd8cbd7eb1af331e437ec3d8f5182ae1b7e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 25 Nov 2012 22:01:42 +0100 Subject: uprobes: Introduce uprobe_is_active() The lifetime of uprobe->rb_node and uprobe->inode is not refcounted, delete_uprobe() is called when we detect that uprobe has no consumers, and it would be deadly wrong to do this twice. Change delete_uprobe() to WARN() if it was already called. We use RB_CLEAR_NODE() to mark uprobe "inactive", then RB_EMPTY_NODE() can be used to detect this case. RB_EMPTY_NODE() is not used directly, we add the trivial helper for the next change. Signed-off-by: Oleg Nesterov Acked-by: Anton Arapov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 5d38b40644b8..358baddc8ac2 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -669,6 +669,10 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vad return set_orig_insn(&uprobe->arch, mm, vaddr); } +static inline bool uprobe_is_active(struct uprobe *uprobe) +{ + return !RB_EMPTY_NODE(&uprobe->rb_node); +} /* * There could be threads that have already hit the breakpoint. They * will recheck the current insn and restart if find_uprobe() fails. @@ -676,9 +680,13 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vad */ static void delete_uprobe(struct uprobe *uprobe) { + if (WARN_ON(!uprobe_is_active(uprobe))) + return; + spin_lock(&uprobes_treelock); rb_erase(&uprobe->rb_node, &uprobes_tree); spin_unlock(&uprobes_treelock); + RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ iput(uprobe->inode); put_uprobe(uprobe); } -- cgit v1.2.3-59-g8ed1b From 66d06dffa5ef6f3544997440af63a91ef36a2171 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 25 Nov 2012 22:48:37 +0100 Subject: uprobes: Kill uprobes_mutex[], separate alloc_uprobe() and __uprobe_register() uprobe_register() and uprobe_unregister() are the only users of mutex_lock(uprobes_hash(inode)), and the only reason why we can't simply remove it is that we need to ensure that delete_uprobe() is not possible after alloc_uprobe() and before consumer_add(). IOW, we need to ensure that when we take uprobe->register_rwsem this uprobe is still valid and we didn't race with _unregister() which called delete_uprobe() in between. With this patch uprobe_register() simply checks uprobe_is_active() and retries if it hits this very unlikely race. uprobes_mutex[] is no longer needed and can be removed. There is another reason for this change, prepare_uprobe() should be folded into alloc_uprobe() and we do not want to hold the extra locks around read_mapping_page/etc. Signed-off-by: Oleg Nesterov Acked-by: Anton Arapov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 51 +++++++++++++++---------------------------------- 1 file changed, 15 insertions(+), 36 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 358baddc8ac2..c3b65d1c8443 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -50,29 +50,6 @@ static struct rb_root uprobes_tree = RB_ROOT; static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ #define UPROBES_HASH_SZ 13 - -/* - * We need separate register/unregister and mmap/munmap lock hashes because - * of mmap_sem nesting. - * - * uprobe_register() needs to install probes on (potentially) all processes - * and thus needs to acquire multiple mmap_sems (consequtively, not - * concurrently), whereas uprobe_mmap() is called while holding mmap_sem - * for the particular process doing the mmap. - * - * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem - * because of lock order against i_mmap_mutex. This means there's a hole in - * the register vma iteration where a mmap() can happen. - * - * Thus uprobe_register() can race with uprobe_mmap() and we can try and - * install a probe where one is already installed. - */ - -/* serialize (un)register */ -static struct mutex uprobes_mutex[UPROBES_HASH_SZ]; - -#define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) - /* serialize uprobe->pending_list */ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) @@ -865,20 +842,26 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * if (offset > i_size_read(inode)) return -EINVAL; - ret = -ENOMEM; - mutex_lock(uprobes_hash(inode)); + retry: uprobe = alloc_uprobe(inode, offset); - if (uprobe) { - down_write(&uprobe->register_rwsem); + if (!uprobe) + return -ENOMEM; + /* + * We can race with uprobe_unregister()->delete_uprobe(). + * Check uprobe_is_active() and retry if it is false. + */ + down_write(&uprobe->register_rwsem); + ret = -EAGAIN; + if (likely(uprobe_is_active(uprobe))) { ret = __uprobe_register(uprobe, uc); if (ret) __uprobe_unregister(uprobe, uc); - up_write(&uprobe->register_rwsem); } - mutex_unlock(uprobes_hash(inode)); - if (uprobe) - put_uprobe(uprobe); + up_write(&uprobe->register_rwsem); + put_uprobe(uprobe); + if (unlikely(ret == -EAGAIN)) + goto retry; return ret; } @@ -896,11 +879,9 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume if (!uprobe) return; - mutex_lock(uprobes_hash(inode)); down_write(&uprobe->register_rwsem); __uprobe_unregister(uprobe, uc); up_write(&uprobe->register_rwsem); - mutex_unlock(uprobes_hash(inode)); put_uprobe(uprobe); } @@ -1609,10 +1590,8 @@ static int __init init_uprobes(void) { int i; - for (i = 0; i < UPROBES_HASH_SZ; i++) { - mutex_init(&uprobes_mutex[i]); + for (i = 0; i < UPROBES_HASH_SZ; i++) mutex_init(&uprobes_mmap_mutex[i]); - } if (percpu_init_rwsem(&dup_mmap_sem)) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From 806a98bdf2a862fef0fc880399d677b35ba525ff Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 27 Dec 2012 18:21:11 +0100 Subject: uprobes: Rationalize the usage of filter_chain() filter_chain() was added into install_breakpoint/remove_breakpoint to simplify the initial changes but this is sub-optimal. This patch shifts the callsite to the callers, register_for_each_vma() and uprobe_mmap(). This way: - It will be easier to add the new arguments. This is the main reason, we can do more optimizations later. - register_for_each_vma(is_register => true) can be optimized, we only need to consult the new consumer. The previous consumers were already asked when they called uprobe_register(). This patch also moves the MMF_HAS_UPROBES check from remove_breakpoint(), this allows to avoid the potentionally costly filter_chain(). Note that register_for_each_vma(is_register => false) doesn't really need to take ->consumer_rwsem, but I don't think it makes sense to optimize this and introduce filter_chain_lockless(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 44 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c3b65d1c8443..33912086d54e 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -579,6 +579,11 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, return ret; } +static inline bool consumer_filter(struct uprobe_consumer *uc) +{ + return true; /* TODO: !uc->filter || uc->filter(...) */ +} + static bool filter_chain(struct uprobe *uprobe) { struct uprobe_consumer *uc; @@ -586,8 +591,7 @@ static bool filter_chain(struct uprobe *uprobe) down_read(&uprobe->consumer_rwsem); for (uc = uprobe->consumers; uc; uc = uc->next) { - /* TODO: ret = uc->filter(...) */ - ret = true; + ret = consumer_filter(uc); if (ret) break; } @@ -603,15 +607,6 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, bool first_uprobe; int ret; - /* - * If probe is being deleted, unregister thread could be done with - * the vma-rmap-walk through. Adding a probe now can be fatal since - * nobody will be able to cleanup. But in this case filter_chain() - * must return false, all consumers have gone away. - */ - if (!filter_chain(uprobe)) - return 0; - ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); if (ret) return ret; @@ -636,12 +631,6 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, static int remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) { - if (!test_bit(MMF_HAS_UPROBES, &mm->flags)) - return 0; - - if (filter_chain(uprobe)) - return 0; - set_bit(MMF_RECALC_UPROBES, &mm->flags); return set_orig_insn(&uprobe->arch, mm, vaddr); } @@ -781,10 +770,14 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) vaddr_to_offset(vma, info->vaddr) != uprobe->offset) goto unlock; - if (is_register) - err = install_breakpoint(uprobe, mm, vma, info->vaddr); - else - err |= remove_breakpoint(uprobe, mm, info->vaddr); + if (is_register) { + /* consult only the "caller", new consumer. */ + if (consumer_filter(uprobe->consumers)) + err = install_breakpoint(uprobe, mm, vma, info->vaddr); + } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { + if (!filter_chain(uprobe)) + err |= remove_breakpoint(uprobe, mm, info->vaddr); + } unlock: up_write(&mm->mmap_sem); @@ -968,9 +961,14 @@ int uprobe_mmap(struct vm_area_struct *vma) mutex_lock(uprobes_mmap_hash(inode)); build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); - + /* + * We can race with uprobe_unregister(), this uprobe can be already + * removed. But in this case filter_chain() must return false, all + * consumers have gone away. + */ list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { - if (!fatal_signal_pending(current)) { + if (!fatal_signal_pending(current) && + filter_chain(uprobe)) { unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); } -- cgit v1.2.3-59-g8ed1b From 8a7f2fa0dea3b019500961b86d765e6fdd4bffb2 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 28 Dec 2012 17:58:38 +0100 Subject: uprobes: Reintroduce uprobe_consumer->filter() Finally add uprobe_consumer->filter() and change consumer_filter() to actually call this method. Note that ->filter() accepts mm_struct, not task_struct. Because: 1. We do not have for_each_mm_user(mm, task). 2. Even if we implement for_each_mm_user(), ->filter() can use it itself. 3. It is not clear who will actually need this interface to do the "nontrivial" filtering. Another argument is "enum uprobe_filter_ctx", consumer->filter() can use it to figure out why/where it was called. For example, perhaps we can add UPROBE_FILTER_PRE_REGISTER used by build_map_info() to quickly "nack" the unwanted mm's. In this case consumer should know that it is called under ->i_mmap_mutex. See the previous discussion at http://marc.info/?t=135214229700002 Perhaps we should pass more arguments, vma/vaddr? Note: this patch obviously can't help to filter out the child created by fork(), this will be addressed later. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- include/linux/uprobes.h | 9 +++++++++ kernel/events/uprobes.c | 18 +++++++++++------- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 83742b91ff73..c2df6934fdc6 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -35,8 +35,17 @@ struct inode; # include #endif +enum uprobe_filter_ctx { + UPROBE_FILTER_REGISTER, + UPROBE_FILTER_UNREGISTER, + UPROBE_FILTER_MMAP, +}; + struct uprobe_consumer { int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); + bool (*filter)(struct uprobe_consumer *self, + enum uprobe_filter_ctx ctx, + struct mm_struct *mm); struct uprobe_consumer *next; }; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 33912086d54e..c2737be3c4b8 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -579,19 +579,21 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, return ret; } -static inline bool consumer_filter(struct uprobe_consumer *uc) +static inline bool consumer_filter(struct uprobe_consumer *uc, + enum uprobe_filter_ctx ctx, struct mm_struct *mm) { - return true; /* TODO: !uc->filter || uc->filter(...) */ + return !uc->filter || uc->filter(uc, ctx, mm); } -static bool filter_chain(struct uprobe *uprobe) +static bool filter_chain(struct uprobe *uprobe, + enum uprobe_filter_ctx ctx, struct mm_struct *mm) { struct uprobe_consumer *uc; bool ret = false; down_read(&uprobe->consumer_rwsem); for (uc = uprobe->consumers; uc; uc = uc->next) { - ret = consumer_filter(uc); + ret = consumer_filter(uc, ctx, mm); if (ret) break; } @@ -772,10 +774,12 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) if (is_register) { /* consult only the "caller", new consumer. */ - if (consumer_filter(uprobe->consumers)) + if (consumer_filter(uprobe->consumers, + UPROBE_FILTER_REGISTER, mm)) err = install_breakpoint(uprobe, mm, vma, info->vaddr); } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { - if (!filter_chain(uprobe)) + if (!filter_chain(uprobe, + UPROBE_FILTER_UNREGISTER, mm)) err |= remove_breakpoint(uprobe, mm, info->vaddr); } @@ -968,7 +972,7 @@ int uprobe_mmap(struct vm_area_struct *vma) */ list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { if (!fatal_signal_pending(current) && - filter_chain(uprobe)) { + filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); } -- cgit v1.2.3-59-g8ed1b From da1816b1caeccdff04531e763bb35d7caa3ed19f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 29 Dec 2012 17:49:11 +0100 Subject: uprobes: Teach handler_chain() to filter out the probed task Currrently the are 2 problems with pre-filtering: 1. It is not possible to add/remove a task (mm) after uprobe_register() 2. A forked child inherits all breakpoints and uprobe_consumer can not control this. This patch does the first step to improve the filtering. handler_chain() removes the breakpoints installed by this uprobe from current->mm if all handlers return UPROBE_HANDLER_REMOVE. Note that handler_chain() relies on ->register_rwsem to avoid the race with uprobe_register/unregister which can add/del a consumer, or even remove and then insert the new uprobe at the same address. Perhaps we will add uprobe_apply_mm(uprobe, mm, is_register) and teach copy_mm() to do filter(UPROBE_FILTER_FORK), but I think this change makes sense anyway. Note: instead of checking the retcode from uc->handler, we could add uc->filter(UPROBE_FILTER_BPHIT). But I think this is not optimal to call 2 hooks in a row. This buys nothing, and if handler/filter do something nontrivial they will probably do the same work twice. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- include/linux/uprobes.h | 3 +++ kernel/events/uprobes.c | 58 ++++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 51 insertions(+), 10 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index c2df6934fdc6..95d0002efda5 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -35,6 +35,9 @@ struct inode; # include #endif +#define UPROBE_HANDLER_REMOVE 1 +#define UPROBE_HANDLER_MASK 1 + enum uprobe_filter_ctx { UPROBE_FILTER_REGISTER, UPROBE_FILTER_UNREGISTER, diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c2737be3c4b8..04c104ad9522 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -440,16 +440,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) return uprobe; } -static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) -{ - struct uprobe_consumer *uc; - - down_read(&uprobe->register_rwsem); - for (uc = uprobe->consumers; uc; uc = uc->next) - uc->handler(uc, regs); - up_read(&uprobe->register_rwsem); -} - static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) { down_write(&uprobe->consumer_rwsem); @@ -882,6 +872,33 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume put_uprobe(uprobe); } +static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) +{ + struct vm_area_struct *vma; + int err = 0; + + down_read(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) { + unsigned long vaddr; + loff_t offset; + + if (!valid_vma(vma, false) || + vma->vm_file->f_mapping->host != uprobe->inode) + continue; + + offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; + if (uprobe->offset < offset || + uprobe->offset >= offset + vma->vm_end - vma->vm_start) + continue; + + vaddr = offset_to_vaddr(vma, uprobe->offset); + err |= remove_breakpoint(uprobe, mm, vaddr); + } + up_read(&mm->mmap_sem); + + return err; +} + static struct rb_node * find_node_in_range(struct inode *inode, loff_t min, loff_t max) { @@ -1435,6 +1452,27 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) return uprobe; } +static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) +{ + struct uprobe_consumer *uc; + int remove = UPROBE_HANDLER_REMOVE; + + down_read(&uprobe->register_rwsem); + for (uc = uprobe->consumers; uc; uc = uc->next) { + int rc = uc->handler(uc, regs); + + WARN(rc & ~UPROBE_HANDLER_MASK, + "bad rc=0x%x from %pf()\n", rc, uc->handler); + remove &= rc; + } + + if (remove && uprobe->consumers) { + WARN_ON(!uprobe_is_active(uprobe)); + unapply_uprobe(uprobe, current->mm); + } + up_read(&uprobe->register_rwsem); +} + /* * Run handler and ask thread to singlestep. * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. -- cgit v1.2.3-59-g8ed1b From cf31ec3f7fece93f3fce3ee5964e27857141ea47 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 30 Dec 2012 15:21:22 +0100 Subject: uprobes/x86: Change __skip_sstep() to actually skip the whole insn __skip_sstep() doesn't update regs->ip. Currently this is correct but only "by accident" and it doesn't skip the whole insn. Change it to advance ->ip by the length of the detected 0x66*0x90 sequence. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index c71025b67462..4e33a35d659e 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -680,8 +680,11 @@ static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) if (auprobe->insn[i] == 0x66) continue; - if (auprobe->insn[i] == 0x90) + if (auprobe->insn[i] == 0x90) { + regs->ip = uprobe_get_swbp_addr(regs); + regs->ip += i + 1; return true; + } break; } -- cgit v1.2.3-59-g8ed1b From 74e59dfc6b19e3472a7c16ad57bc831e6e647895 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 30 Dec 2012 15:54:08 +0100 Subject: uprobes: Change handle_swbp() to expose bp_vaddr to handler_chain() Change handle_swbp() to set regs->ip = bp_vaddr in advance, this is what consumer->handler() needs but uprobe_get_swbp_addr() is not exported. This also simplifies the code and makes it more consistent across the supported architectures. handle_swbp() becomes the only caller of uprobe_get_swbp_addr(). Signed-off-by: Oleg Nesterov Acked-by: Ananth N Mavinakayanahalli --- arch/x86/kernel/uprobes.c | 1 - kernel/events/uprobes.c | 15 +++++++-------- kernel/trace/trace_uprobe.c | 4 ++-- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 4e33a35d659e..0ba4cfb4f412 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -681,7 +681,6 @@ static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) continue; if (auprobe->insn[i] == 0x90) { - regs->ip = uprobe_get_swbp_addr(regs); regs->ip += i + 1; return true; } diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 04c104ad9522..f1b807831fc2 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1504,6 +1504,10 @@ static void handle_swbp(struct pt_regs *regs) } return; } + + /* change it in advance for ->handler() and restart */ + instruction_pointer_set(regs, bp_vaddr); + /* * TODO: move copy_insn/etc into _register and remove this hack. * After we hit the bp, _unregister + _register can install the @@ -1511,14 +1515,14 @@ static void handle_swbp(struct pt_regs *regs) */ smp_rmb(); /* pairs with wmb() in install_breakpoint() */ if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) - goto restart; + goto out; utask = current->utask; if (!utask) { utask = add_utask(); /* Cannot allocate; re-execute the instruction. */ if (!utask) - goto restart; + goto out; } handler_chain(uprobe, regs); @@ -1531,12 +1535,7 @@ static void handle_swbp(struct pt_regs *regs) return; } -restart: - /* - * cannot singlestep; cannot skip instruction; - * re-execute the instruction. - */ - instruction_pointer_set(regs, bp_vaddr); + /* can_skip_sstep() succeeded, or restart if can't singlestep */ out: put_uprobe(uprobe); } diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index e668024773d4..17d9b2bcc28d 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -492,7 +492,7 @@ static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) return; entry = ring_buffer_event_data(event); - entry->ip = uprobe_get_swbp_addr(task_pt_regs(current)); + entry->ip = instruction_pointer(task_pt_regs(current)); data = (u8 *)&entry[1]; for (i = 0; i < tu->nr_args; i++) call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); @@ -667,7 +667,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) if (!entry) goto out; - entry->ip = uprobe_get_swbp_addr(task_pt_regs(current)); + entry->ip = instruction_pointer(task_pt_regs(current)); data = (u8 *)&entry[1]; for (i = 0; i < tu->nr_args; i++) call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); -- cgit v1.2.3-59-g8ed1b From c8a82538001e1a68f4a319d5a75de90d1f284731 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 30 Dec 2012 17:40:39 +0100 Subject: uprobes: Move alloc_page() from xol_add_vma() to xol_alloc_area() Move alloc_page() from xol_add_vma() to xol_alloc_area() to cleanup the code. This separates the memory allocations and consolidates the -EALREADY cleanups and the error handling. Signed-off-by: Oleg Nesterov Acked-by: Anton Arapov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index f1b807831fc2..ea2e2a85479a 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1041,22 +1041,14 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon /* Slot allocation for XOL */ static int xol_add_vma(struct xol_area *area) { - struct mm_struct *mm; - int ret; - - area->page = alloc_page(GFP_HIGHUSER); - if (!area->page) - return -ENOMEM; - - ret = -EALREADY; - mm = current->mm; + struct mm_struct *mm = current->mm; + int ret = -EALREADY; down_write(&mm->mmap_sem); if (mm->uprobes_state.xol_area) goto fail; ret = -ENOMEM; - /* Try to map as high as possible, this is only a hint. */ area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); if (area->vaddr & ~PAGE_MASK) { @@ -1072,11 +1064,8 @@ static int xol_add_vma(struct xol_area *area) smp_wmb(); /* pairs with get_xol_area() */ mm->uprobes_state.xol_area = area; ret = 0; - -fail: + fail: up_write(&mm->mmap_sem); - if (ret) - __free_page(area->page); return ret; } @@ -1104,21 +1093,26 @@ static struct xol_area *xol_alloc_area(void) area = kzalloc(sizeof(*area), GFP_KERNEL); if (unlikely(!area)) - return NULL; + goto out; area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); - if (!area->bitmap) - goto fail; + goto free_area; + + area->page = alloc_page(GFP_HIGHUSER); + if (!area->page) + goto free_bitmap; init_waitqueue_head(&area->wq); if (!xol_add_vma(area)) return area; -fail: + __free_page(area->page); + free_bitmap: kfree(area->bitmap); + free_area: kfree(area); - + out: return get_xol_area(current->mm); } -- cgit v1.2.3-59-g8ed1b From 9b545df809644912552360054c7bbe8b8a9e01fa Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 31 Dec 2012 16:39:49 +0100 Subject: uprobes: Fold xol_alloc_area() into get_xol_area() Currently only xol_get_insn_slot() does get_xol_area() + xol_alloc_area(), but this will have more users and we do not want to copy-and-paste this code. This patch simply moves xol_alloc_area() into get_xol_area() to simplify the current and future code. Signed-off-by: Oleg Nesterov Acked-by: Anton Arapov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index ea2e2a85479a..7e3e5c5b0d88 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1070,27 +1070,21 @@ static int xol_add_vma(struct xol_area *area) return ret; } -static struct xol_area *get_xol_area(struct mm_struct *mm) -{ - struct xol_area *area; - - area = mm->uprobes_state.xol_area; - smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ - - return area; -} - /* - * xol_alloc_area - Allocate process's xol_area. - * This area will be used for storing instructions for execution out of - * line. + * get_xol_area - Allocate process's xol_area if necessary. + * This area will be used for storing instructions for execution out of line. * * Returns the allocated area or NULL. */ -static struct xol_area *xol_alloc_area(void) +static struct xol_area *get_xol_area(void) { + struct mm_struct *mm = current->mm; struct xol_area *area; + area = mm->uprobes_state.xol_area; + if (area) + goto ret; + area = kzalloc(sizeof(*area), GFP_KERNEL); if (unlikely(!area)) goto out; @@ -1113,7 +1107,10 @@ static struct xol_area *xol_alloc_area(void) free_area: kfree(area); out: - return get_xol_area(current->mm); + area = mm->uprobes_state.xol_area; + ret: + smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ + return area; } /* @@ -1189,14 +1186,11 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot unsigned long offset; void *vaddr; - area = get_xol_area(current->mm); - if (!area) { - area = xol_alloc_area(); - if (!area) - return 0; - } - current->utask->xol_vaddr = xol_take_insn_slot(area); + area = get_xol_area(); + if (!area) + return 0; + current->utask->xol_vaddr = xol_take_insn_slot(area); /* * Initialize the slot if xol_vaddr points to valid * instruction slot. -- cgit v1.2.3-59-g8ed1b From 5a2df662aafdabffb2cf3adb780a5adf66dfb3bc Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 31 Dec 2012 17:03:32 +0100 Subject: uprobes: Turn add_utask() into get_utask() Rename add_utask() into get_utask() and change it to allocate on demand to simplify the caller. Like get_xol_area() it will have more users. Signed-off-by: Oleg Nesterov Acked-by: Anton Arapov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 7e3e5c5b0d88..16e54d63a9fd 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1290,23 +1290,18 @@ void uprobe_copy_process(struct task_struct *t) } /* - * Allocate a uprobe_task object for the task. - * Called when the thread hits a breakpoint for the first time. + * Allocate a uprobe_task object for the task if if necessary. + * Called when the thread hits a breakpoint. * * Returns: * - pointer to new uprobe_task on success * - NULL otherwise */ -static struct uprobe_task *add_utask(void) +static struct uprobe_task *get_utask(void) { - struct uprobe_task *utask; - - utask = kzalloc(sizeof *utask, GFP_KERNEL); - if (unlikely(!utask)) - return NULL; - - current->utask = utask; - return utask; + if (!current->utask) + current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); + return current->utask; } /* Prepare to single-step probed instruction out of line. */ @@ -1505,13 +1500,9 @@ static void handle_swbp(struct pt_regs *regs) if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) goto out; - utask = current->utask; - if (!utask) { - utask = add_utask(); - /* Cannot allocate; re-execute the instruction. */ - if (!utask) - goto out; - } + utask = get_utask(); + if (!utask) + goto out; /* re-execute the instruction. */ handler_chain(uprobe, regs); if (can_skip_sstep(uprobe, regs)) -- cgit v1.2.3-59-g8ed1b From a6cb3f6d51253e9cf21a38b17c025018117809d7 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 31 Dec 2012 18:00:06 +0100 Subject: uprobes: Do not play with utask in xol_get_insn_slot() pre_ssout()->xol_get_insn_slot() path is confusing and buggy. This patch cleanups the code, the next one fixes the bug. Change xol_get_insn_slot() to only allocate the slot and do nothing more, move the initialization of utask->xol_vaddr/vaddr into pre_ssout(). Signed-off-by: Oleg Nesterov Acked-by: Anton Arapov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 16e54d63a9fd..8d9c5bcb110e 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1176,30 +1176,26 @@ static unsigned long xol_take_insn_slot(struct xol_area *area) } /* - * xol_get_insn_slot - If was not allocated a slot, then - * allocate a slot. + * xol_get_insn_slot - allocate a slot for xol. * Returns the allocated slot address or 0. */ -static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr) +static unsigned long xol_get_insn_slot(struct uprobe *uprobe) { struct xol_area *area; unsigned long offset; + unsigned long xol_vaddr; void *vaddr; area = get_xol_area(); if (!area) return 0; - current->utask->xol_vaddr = xol_take_insn_slot(area); - /* - * Initialize the slot if xol_vaddr points to valid - * instruction slot. - */ - if (unlikely(!current->utask->xol_vaddr)) + xol_vaddr = xol_take_insn_slot(area); + if (unlikely(!xol_vaddr)) return 0; - current->utask->vaddr = slot_addr; - offset = current->utask->xol_vaddr & ~PAGE_MASK; + /* Initialize the slot */ + offset = xol_vaddr & ~PAGE_MASK; vaddr = kmap_atomic(area->page); memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES); kunmap_atomic(vaddr); @@ -1209,7 +1205,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot */ flush_dcache_page(area->page); - return current->utask->xol_vaddr; + return xol_vaddr; } /* @@ -1306,12 +1302,21 @@ static struct uprobe_task *get_utask(void) /* Prepare to single-step probed instruction out of line. */ static int -pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr) +pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) { - if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs)) - return 0; + struct uprobe_task *utask; + unsigned long xol_vaddr; + + utask = current->utask; + + xol_vaddr = xol_get_insn_slot(uprobe); + if (!xol_vaddr) + return -ENOMEM; + + utask->xol_vaddr = xol_vaddr; + utask->vaddr = bp_vaddr; - return -EFAULT; + return arch_uprobe_pre_xol(&uprobe->arch, regs); } /* -- cgit v1.2.3-59-g8ed1b From aba51024e7159c93914557caaa2b8cda26331091 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 31 Dec 2012 18:12:48 +0100 Subject: uprobes: Fix utask->xol_vaddr leak in pre_ssout() pre_ssout() should do xol_free_insn_slot() if arch_uprobe_pre_xol() fails, otherwise nobody will free the allocated slot. Signed-off-by: Oleg Nesterov Acked-by: Anton Arapov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 8d9c5bcb110e..0527379dac5b 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1306,6 +1306,7 @@ pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) { struct uprobe_task *utask; unsigned long xol_vaddr; + int err; utask = current->utask; @@ -1316,7 +1317,13 @@ pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) utask->xol_vaddr = xol_vaddr; utask->vaddr = bp_vaddr; - return arch_uprobe_pre_xol(&uprobe->arch, regs); + err = arch_uprobe_pre_xol(&uprobe->arch, regs); + if (unlikely(err)) { + xol_free_insn_slot(current); + return err; + } + + return 0; } /* -- cgit v1.2.3-59-g8ed1b From 608e7427c0a06de0d70374a9fd7defc8eb228b7e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 31 Dec 2012 18:20:42 +0100 Subject: uprobes: Do not allocate current->utask unnecessary handle_swbp() does get_utask() before can_skip_sstep() for no reason, we do not need ->utask if can_skip_sstep() succeeds. Move get_utask() to pre_ssout() who actually starts to use it. Move the initialization of utask->active_uprobe/state as well. This way the whole initialization is consolidated in pre_ssout(). Signed-off-by: Oleg Nesterov Acked-by: Anton Arapov --- kernel/events/uprobes.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 0527379dac5b..071edcb3e62d 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1308,7 +1308,9 @@ pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) unsigned long xol_vaddr; int err; - utask = current->utask; + utask = get_utask(); + if (!utask) + return -ENOMEM; xol_vaddr = xol_get_insn_slot(uprobe); if (!xol_vaddr) @@ -1323,6 +1325,8 @@ pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) return err; } + utask->active_uprobe = uprobe; + utask->state = UTASK_SSTEP; return 0; } @@ -1474,7 +1478,6 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) */ static void handle_swbp(struct pt_regs *regs) { - struct uprobe_task *utask; struct uprobe *uprobe; unsigned long bp_vaddr; int uninitialized_var(is_swbp); @@ -1512,19 +1515,12 @@ static void handle_swbp(struct pt_regs *regs) if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) goto out; - utask = get_utask(); - if (!utask) - goto out; /* re-execute the instruction. */ - handler_chain(uprobe, regs); if (can_skip_sstep(uprobe, regs)) goto out; - if (!pre_ssout(uprobe, regs, bp_vaddr)) { - utask->active_uprobe = uprobe; - utask->state = UTASK_SSTEP; + if (!pre_ssout(uprobe, regs, bp_vaddr)) return; - } /* can_skip_sstep() succeeded, or restart if can't singlestep */ out: -- cgit v1.2.3-59-g8ed1b From af4355e91f15812df8608925738c91be57c580dd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 31 Dec 2012 18:37:11 +0100 Subject: uprobes: Kill the bogus IS_ERR_VALUE(xol_vaddr) check utask->xol_vaddr is either zero or valid, remove the bogus IS_ERR_VALUE() check in xol_free_insn_slot(). Signed-off-by: Oleg Nesterov Acked-by: Anton Arapov Acked-by: Srikar Dronamraju --- kernel/events/uprobes.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 071edcb3e62d..f6c7062fb950 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1223,8 +1223,7 @@ static void xol_free_insn_slot(struct task_struct *tsk) return; slot_addr = tsk->utask->xol_vaddr; - - if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr))) + if (unlikely(!slot_addr)) return; area = tsk->mm->uprobes_state.xol_area; -- cgit v1.2.3-59-g8ed1b From e8440c1458ba571bc3fac8a6beb53ff604199f5b Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Sun, 13 Jan 2013 19:03:34 +0100 Subject: uprobes: Add exports for module use The original pull message for uprobes (commit 654443e2) noted: This tree includes uprobes support in 'perf probe' - but SystemTap (and other tools) can take advantage of user probe points as well. In order to actually be usable in module-based tools like SystemTap, the interface needs to be exported. This patch first adds the obvious exports for uprobe_register and uprobe_unregister. Then it also adds one for task_user_regset_view, which is necessary to get the correct state of userspace registers. Signed-off-by: Josh Stone Signed-off-by: Oleg Nesterov --- kernel/events/uprobes.c | 3 +++ kernel/ptrace.c | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index f6c7062fb950..221fc58f59e3 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -27,6 +27,7 @@ #include /* read_mapping_page */ #include #include +#include #include /* anon_vma_prepare */ #include /* set_pte_at_notify */ #include /* try_to_free_swap */ @@ -851,6 +852,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * goto retry; return ret; } +EXPORT_SYMBOL_GPL(uprobe_register); /* * uprobe_unregister - unregister a already registered probe. @@ -871,6 +873,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume up_write(&uprobe->register_rwsem); put_uprobe(uprobe); } +EXPORT_SYMBOL_GPL(uprobe_unregister); static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) { diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 6cbeaae4406d..acbd28424d81 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -712,6 +712,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type, kiov->iov_len, kiov->iov_base); } +/* + * This is declared in linux/regset.h and defined in machine-dependent + * code. We put the export here, near the primary machine-neutral use, + * to ensure no machine forgets it. + */ +EXPORT_SYMBOL_GPL(task_user_regset_view); #endif int ptrace_request(struct task_struct *child, long request, -- cgit v1.2.3-59-g8ed1b From 84d7ed799fd6c1366547d88ddb8188c65de3b94f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 27 Jan 2013 18:20:45 +0100 Subject: uprobes/tracing: Fix dentry/mount leak in create_trace_uprobe() create_trace_uprobe() does kern_path() to find ->d_inode, but forgets to do path_put(). We can do this right after igrab(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/trace/trace_uprobe.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 17d9b2bcc28d..06c22bad776a 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -253,16 +253,18 @@ static int create_trace_uprobe(int argc, char **argv) if (ret) goto fail_address_parse; - ret = kstrtoul(arg, 0, &offset); - if (ret) - goto fail_address_parse; - inode = igrab(path.dentry->d_inode); + path_put(&path); + if (!S_ISREG(inode->i_mode)) { ret = -EINVAL; goto fail_address_parse; } + ret = kstrtoul(arg, 0, &offset); + if (ret) + goto fail_address_parse; + argc -= 2; argv += 2; -- cgit v1.2.3-59-g8ed1b From 4161824f18ff4f56f46595a4016c7315dd0d24f1 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 27 Jan 2013 18:36:24 +0100 Subject: uprobes/tracing: Fully initialize uprobe_trace_consumer before uprobe_register() probe_event_enable() does uprobe_register() and only after that sets utc->tu and tu->consumer/flags. This can race with uprobe_dispatcher() which can miss these assignments or see them out of order. Nothing really bad can happen, but this doesn't look clean/safe. And this does not allow to use uprobe_consumer->filter() we are going to add, it is called by uprobe_register() and it needs utc->tu. Change this code to initialize everything before uprobe_register(), and reset tu->consumer/flags if it fails. We can't race with event_disable(), the caller holds event_mutex, and if we could the code would be wrong anyway. In fact I think uprobe_trace_consumer should die, it buys nothing but complicates the code. We can simply add uprobe_consumer into trace_uprobe. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/trace/trace_uprobe.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 06c22bad776a..15b8eceeddc5 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -552,17 +552,18 @@ static int probe_event_enable(struct trace_uprobe *tu, int flag) return -EINTR; utc->cons.handler = uprobe_dispatcher; + utc->tu = tu; + tu->consumer = utc; + tu->flags |= flag; + ret = uprobe_register(tu->inode, tu->offset, &utc->cons); if (ret) { + tu->consumer = NULL; + tu->flags &= ~flag; kfree(utc); - return ret; } - tu->flags |= flag; - utc->tu = tu; - tu->consumer = utc; - - return 0; + return ret; } static void probe_event_disable(struct trace_uprobe *tu, int flag) -- cgit v1.2.3-59-g8ed1b From 7e4e28c53963e6cfa94d8109bb8f5233c5659048 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 28 Jan 2013 17:08:47 +0100 Subject: uprobes/tracing: Ensure inode != NULL in create_trace_uprobe() probe_event_enable/disable() check tu->inode != NULL at the start. This is ugly, if igrab() can fail create_trace_uprobe() should not succeed and "postpone" the failure. And S_ISREG(inode->i_mode) check added by d24d7dbf is not safe. Note: alloc_uprobe() should probably check igrab() != NULL as well. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/trace/trace_uprobe.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 15b8eceeddc5..f7838cfd61b9 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -256,7 +256,7 @@ static int create_trace_uprobe(int argc, char **argv) inode = igrab(path.dentry->d_inode); path_put(&path); - if (!S_ISREG(inode->i_mode)) { + if (!inode || !S_ISREG(inode->i_mode)) { ret = -EINVAL; goto fail_address_parse; } @@ -544,7 +544,7 @@ static int probe_event_enable(struct trace_uprobe *tu, int flag) struct uprobe_trace_consumer *utc; int ret = 0; - if (!tu->inode || tu->consumer) + if (tu->consumer) return -EINTR; utc = kzalloc(sizeof(struct uprobe_trace_consumer), GFP_KERNEL); @@ -568,7 +568,7 @@ static int probe_event_enable(struct trace_uprobe *tu, int flag) static void probe_event_disable(struct trace_uprobe *tu, int flag) { - if (!tu->inode || !tu->consumer) + if (!tu->consumer) return; uprobe_unregister(tu->inode, tu->offset, &tu->consumer->cons); -- cgit v1.2.3-59-g8ed1b From b64b007797c1e6d6b745c93c296ba1d5f4d72d86 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 31 Jan 2013 19:15:30 +0100 Subject: uprobes/tracing: Introduce is_trace_uprobe_enabled() probe_event_enable/disable() check tu->consumer != NULL to avoid the wrong uprobe_register/unregister(). We are going to kill this pointer and "struct uprobe_trace_consumer", so we add the new helper, is_trace_uprobe_enabled(), which can rely on TP_FLAG_TRACE/TP_FLAG_PROFILE instead. Note: the current logic doesn't look optimal, it is not clear why TP_FLAG_TRACE/TP_FLAG_PROFILE are mutually exclusive, we will probably change this later. Also kill the unused TP_FLAG_UPROBE. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/trace/trace_probe.h | 1 - kernel/trace/trace_uprobe.c | 9 +++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 933708677814..5c7e09d10d74 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -66,7 +66,6 @@ #define TP_FLAG_TRACE 1 #define TP_FLAG_PROFILE 2 #define TP_FLAG_REGISTERED 4 -#define TP_FLAG_UPROBE 8 /* data_rloc: data relative location, compatible with u32 */ diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index f7838cfd61b9..d6c6e2a345a7 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -539,12 +539,17 @@ partial: return TRACE_TYPE_PARTIAL_LINE; } +static inline bool is_trace_uprobe_enabled(struct trace_uprobe *tu) +{ + return tu->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE); +} + static int probe_event_enable(struct trace_uprobe *tu, int flag) { struct uprobe_trace_consumer *utc; int ret = 0; - if (tu->consumer) + if (is_trace_uprobe_enabled(tu)) return -EINTR; utc = kzalloc(sizeof(struct uprobe_trace_consumer), GFP_KERNEL); @@ -568,7 +573,7 @@ static int probe_event_enable(struct trace_uprobe *tu, int flag) static void probe_event_disable(struct trace_uprobe *tu, int flag) { - if (!tu->consumer) + if (!is_trace_uprobe_enabled(tu)) return; uprobe_unregister(tu->inode, tu->offset, &tu->consumer->cons); -- cgit v1.2.3-59-g8ed1b From a932b7381f81235530c3d0acbd3ba2c7537d78e5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 31 Jan 2013 19:47:23 +0100 Subject: uprobes/tracing: Kill uprobe_trace_consumer, embed uprobe_consumer into trace_uprobe trace_uprobe->consumer and "struct uprobe_trace_consumer" add the unnecessary indirection and complicate the code for no reason. This patch simply embeds uprobe_consumer into "struct trace_uprobe", all other changes only fix the compilation errors. Signed-off-by: Oleg Nesterov --- kernel/trace/trace_uprobe.c | 35 ++++++----------------------------- 1 file changed, 6 insertions(+), 29 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index d6c6e2a345a7..9c8babbfd11b 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -31,17 +31,11 @@ /* * uprobe event core functions */ -struct trace_uprobe; -struct uprobe_trace_consumer { - struct uprobe_consumer cons; - struct trace_uprobe *tu; -}; - struct trace_uprobe { struct list_head list; struct ftrace_event_class class; struct ftrace_event_call call; - struct uprobe_trace_consumer *consumer; + struct uprobe_consumer consumer; struct inode *inode; char *filename; unsigned long offset; @@ -92,6 +86,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs) goto error; INIT_LIST_HEAD(&tu->list); + tu->consumer.handler = uprobe_dispatcher; return tu; error: @@ -546,27 +541,15 @@ static inline bool is_trace_uprobe_enabled(struct trace_uprobe *tu) static int probe_event_enable(struct trace_uprobe *tu, int flag) { - struct uprobe_trace_consumer *utc; int ret = 0; if (is_trace_uprobe_enabled(tu)) return -EINTR; - utc = kzalloc(sizeof(struct uprobe_trace_consumer), GFP_KERNEL); - if (!utc) - return -EINTR; - - utc->cons.handler = uprobe_dispatcher; - utc->tu = tu; - tu->consumer = utc; tu->flags |= flag; - - ret = uprobe_register(tu->inode, tu->offset, &utc->cons); - if (ret) { - tu->consumer = NULL; + ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); + if (ret) tu->flags &= ~flag; - kfree(utc); - } return ret; } @@ -576,10 +559,8 @@ static void probe_event_disable(struct trace_uprobe *tu, int flag) if (!is_trace_uprobe_enabled(tu)) return; - uprobe_unregister(tu->inode, tu->offset, &tu->consumer->cons); + uprobe_unregister(tu->inode, tu->offset, &tu->consumer); tu->flags &= ~flag; - kfree(tu->consumer); - tu->consumer = NULL; } static int uprobe_event_define_fields(struct ftrace_event_call *event_call) @@ -717,13 +698,9 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) { - struct uprobe_trace_consumer *utc; struct trace_uprobe *tu; - utc = container_of(con, struct uprobe_trace_consumer, cons); - tu = utc->tu; - if (!tu || tu->consumer != utc) - return 0; + tu = container_of(con, struct trace_uprobe, consumer); if (tu->flags & TP_FLAG_TRACE) uprobe_trace_func(tu, regs); -- cgit v1.2.3-59-g8ed1b From 1b47aefd9b6bd439a4be43c47acd22987ac22db8 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 31 Jan 2013 19:55:27 +0100 Subject: uprobes/perf: Always increment trace_uprobe->nhit Move tu->nhit++ from uprobe_trace_func() to uprobe_dispatcher(). ->nhit counts how many time we hit the breakpoint inserted by this uprobe, we do not want to loose this info if uprobe was enabled by sys_perf_event_open(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- kernel/trace/trace_uprobe.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 9c8babbfd11b..c4e29e19fdd7 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -476,8 +476,6 @@ static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) unsigned long irq_flags; struct ftrace_event_call *call = &tu->call; - tu->nhit++; - local_save_flags(irq_flags); pc = preempt_count(); @@ -701,6 +699,7 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) struct trace_uprobe *tu; tu = container_of(con, struct trace_uprobe, consumer); + tu->nhit++; if (tu->flags & TP_FLAG_TRACE) uprobe_trace_func(tu, regs); -- cgit v1.2.3-59-g8ed1b From f22c1bb6b4706be3502b378cb14564449b15f983 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 2 Feb 2013 16:27:52 +0100 Subject: perf: Introduce hw_perf_event->tp_target and ->tp_list sys_perf_event_open()->perf_init_event(event) is called before find_get_context(event), this means that event->ctx == NULL when class->reg(TRACE_REG_PERF_REGISTER/OPEN) is called and thus it can't know if this event is per-task or system-wide. This patch adds hw_perf_event->tp_target for PERF_TYPE_TRACEPOINT, this is analogous to PERF_TYPE_BREAKPOINT/bp_target we already have. The patch also moves ->bp_target up so that it can overlap with the new member, this can help the compiler to generate the better code. trace_uprobe_register() will use it for prefiltering to avoid the unnecessary breakpoints in mm's we do not want to trace. ->tp_target doesn't have its own reference, but we can rely on the fact that either sys_perf_event_open() holds a reference, or it is equal to event->ctx->task. So this pointer is always valid until free_event(). Also add the "struct list_head tp_list" into this union. It is not strictly necessary, but it can simplify the next changes and we can add it for free. Signed-off-by: Oleg Nesterov --- include/linux/perf_event.h | 9 +++++++-- kernel/events/core.c | 5 ++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 42adf012145d..e47ee462c2f2 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -135,16 +135,21 @@ struct hw_perf_event { struct { /* software */ struct hrtimer hrtimer; }; + struct { /* tracepoint */ + struct task_struct *tp_target; + /* for tp_event->class */ + struct list_head tp_list; + }; #ifdef CONFIG_HAVE_HW_BREAKPOINT struct { /* breakpoint */ - struct arch_hw_breakpoint info; - struct list_head bp_list; /* * Crufty hack to avoid the chicken and egg * problem hw_breakpoint has with context * creation and event initalization. */ struct task_struct *bp_target; + struct arch_hw_breakpoint info; + struct list_head bp_list; }; #endif }; diff --git a/kernel/events/core.c b/kernel/events/core.c index 301079d06f24..e2d4323c6ae6 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6162,11 +6162,14 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (task) { event->attach_state = PERF_ATTACH_TASK; + + if (attr->type == PERF_TYPE_TRACEPOINT) + event->hw.tp_target = task; #ifdef CONFIG_HAVE_HW_BREAKPOINT /* * hw_breakpoint is a bit difficult here.. */ - if (attr->type == PERF_TYPE_BREAKPOINT) + else if (attr->type == PERF_TYPE_BREAKPOINT) event->hw.bp_target = task; #endif } -- cgit v1.2.3-59-g8ed1b From bdf8647c44766590ed02f9a84a450a796558b753 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 3 Feb 2013 19:21:12 +0100 Subject: uprobes: Introduce uprobe_apply() Currently it is not possible to change the filtering constraints after uprobe_register(), so a consumer can not, say, start to trace a task/mm which was previously filtered out, or remove the no longer needed bp's. Introduce uprobe_apply() which simply does register_for_each_vma() again to consult uprobe_consumer->filter() and install/remove the breakpoints. The only complication is that register_for_each_vma() can no longer assume that uprobe->consumers should be consulter if is_register == T, so we change it to accept "struct uprobe_consumer *new" instead. Unlike uprobe_register(), uprobe_apply(true) doesn't do "unregister" if register_for_each_vma() fails, it is up to caller to handle the error. Note: we probably need to cleanup the current interface, it is strange that uprobe_apply/unregister need inode/offset. We should either change uprobe_register() to return "struct uprobe *", or add a private ->uprobe member in uprobe_consumer. And in the long term uprobe_apply() should take a single argument, uprobe or consumer, even "bool add" should go away. Signed-off-by: Oleg Nesterov --- include/linux/uprobes.h | 6 ++++++ kernel/events/uprobes.c | 39 +++++++++++++++++++++++++++++++++++---- 2 files changed, 41 insertions(+), 4 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 95d0002efda5..02b83db8e2c5 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -101,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); +extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_mmap(struct vm_area_struct *vma); extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -124,6 +125,11 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { return -ENOSYS; } +static inline int +uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add) +{ + return -ENOSYS; +} static inline void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 221fc58f59e3..a567c8c7ef31 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -733,8 +733,10 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register) return curr; } -static int register_for_each_vma(struct uprobe *uprobe, bool is_register) +static int +register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) { + bool is_register = !!new; struct map_info *info; int err = 0; @@ -765,7 +767,7 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) if (is_register) { /* consult only the "caller", new consumer. */ - if (consumer_filter(uprobe->consumers, + if (consumer_filter(new, UPROBE_FILTER_REGISTER, mm)) err = install_breakpoint(uprobe, mm, vma, info->vaddr); } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { @@ -788,7 +790,7 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc) { consumer_add(uprobe, uc); - return register_for_each_vma(uprobe, true); + return register_for_each_vma(uprobe, uc); } static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) @@ -798,7 +800,7 @@ static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *u if (!consumer_del(uprobe, uc)) /* WARN? */ return; - err = register_for_each_vma(uprobe, false); + err = register_for_each_vma(uprobe, NULL); /* TODO : cant unregister? schedule a worker thread */ if (!uprobe->consumers && !err) delete_uprobe(uprobe); @@ -854,6 +856,35 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * } EXPORT_SYMBOL_GPL(uprobe_register); +/* + * uprobe_apply - unregister a already registered probe. + * @inode: the file in which the probe has to be removed. + * @offset: offset from the start of the file. + * @uc: consumer which wants to add more or remove some breakpoints + * @add: add or remove the breakpoints + */ +int uprobe_apply(struct inode *inode, loff_t offset, + struct uprobe_consumer *uc, bool add) +{ + struct uprobe *uprobe; + struct uprobe_consumer *con; + int ret = -ENOENT; + + uprobe = find_uprobe(inode, offset); + if (!uprobe) + return ret; + + down_write(&uprobe->register_rwsem); + for (con = uprobe->consumers; con && con != uc ; con = con->next) + ; + if (con) + ret = register_for_each_vma(uprobe, add ? uc : NULL); + up_write(&uprobe->register_rwsem); + put_uprobe(uprobe); + + return ret; +} + /* * uprobe_unregister - unregister a already registered probe. * @inode: the file in which the probe has to be removed. -- cgit v1.2.3-59-g8ed1b From 736288ba5016e255869c26296014eeff649971c2 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 3 Feb 2013 20:58:35 +0100 Subject: uprobes/perf: Teach trace_uprobe/perf code to track the active perf_event's Introduce "struct trace_uprobe_filter" which records the "active" perf_event's attached to ftrace_event_call. For the start we simply use list_head, we can optimize this later if needed. For example, we do not really need to record an event with ->parent != NULL, we can rely on parent->child_list. And we can certainly do some optimizations for the case when 2 events have the same ->tp_target or tp_target->mm. Change trace_uprobe_register() to process TRACE_REG_PERF_OPEN/CLOSE and add/del this perf_event to the list. We can probably avoid any locking, but lets start with the "obvioulsy correct" trace_uprobe_filter->rwlock which protects everything. Signed-off-by: Oleg Nesterov --- kernel/trace/trace_uprobe.c | 55 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index c4e29e19fdd7..2a74a93afdae 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -28,6 +28,12 @@ #define UPROBE_EVENT_SYSTEM "uprobes" +struct trace_uprobe_filter { + rwlock_t rwlock; + int nr_systemwide; + struct list_head perf_events; +}; + /* * uprobe event core functions */ @@ -35,6 +41,7 @@ struct trace_uprobe { struct list_head list; struct ftrace_event_class class; struct ftrace_event_call call; + struct trace_uprobe_filter filter; struct uprobe_consumer consumer; struct inode *inode; char *filename; @@ -58,6 +65,18 @@ static LIST_HEAD(uprobe_list); static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); +static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) +{ + rwlock_init(&filter->rwlock); + filter->nr_systemwide = 0; + INIT_LIST_HEAD(&filter->perf_events); +} + +static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) +{ + return !filter->nr_systemwide && list_empty(&filter->perf_events); +} + /* * Allocate new trace_uprobe and initialize it (including uprobes). */ @@ -87,6 +106,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs) INIT_LIST_HEAD(&tu->list); tu->consumer.handler = uprobe_dispatcher; + init_trace_uprobe_filter(&tu->filter); return tu; error: @@ -544,6 +564,8 @@ static int probe_event_enable(struct trace_uprobe *tu, int flag) if (is_trace_uprobe_enabled(tu)) return -EINTR; + WARN_ON(!uprobe_filter_is_empty(&tu->filter)); + tu->flags |= flag; ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); if (ret) @@ -557,6 +579,8 @@ static void probe_event_disable(struct trace_uprobe *tu, int flag) if (!is_trace_uprobe_enabled(tu)) return; + WARN_ON(!uprobe_filter_is_empty(&tu->filter)); + uprobe_unregister(tu->inode, tu->offset, &tu->consumer); tu->flags &= ~flag; } @@ -632,6 +656,30 @@ static int set_print_fmt(struct trace_uprobe *tu) } #ifdef CONFIG_PERF_EVENTS +static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) +{ + write_lock(&tu->filter.rwlock); + if (event->hw.tp_target) + list_add(&event->hw.tp_list, &tu->filter.perf_events); + else + tu->filter.nr_systemwide++; + write_unlock(&tu->filter.rwlock); + + return 0; +} + +static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) +{ + write_lock(&tu->filter.rwlock); + if (event->hw.tp_target) + list_del(&event->hw.tp_list); + else + tu->filter.nr_systemwide--; + write_unlock(&tu->filter.rwlock); + + return 0; +} + /* uprobe profile handler */ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) { @@ -687,6 +735,13 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, case TRACE_REG_PERF_UNREGISTER: probe_event_disable(tu, TP_FLAG_PROFILE); return 0; + + case TRACE_REG_PERF_OPEN: + return uprobe_perf_open(tu, data); + + case TRACE_REG_PERF_CLOSE: + return uprobe_perf_close(tu, data); + #endif default: return 0; -- cgit v1.2.3-59-g8ed1b From 31ba334836c0ac0039084859f14a5b96858493dc Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 4 Feb 2013 17:11:58 +0100 Subject: uprobes/perf: Teach trace_uprobe/perf code to pre-filter Finally implement uprobe_perf_filter() which checks ->nr_systemwide or ->perf_events to figure out whether we need to insert the breakpoint. uprobe_perf_open/close are changed to do uprobe_apply(true/false) when the new perf event comes or goes away. Note that currently this is very suboptimal: - uprobe_register() called by TRACE_REG_PERF_REGISTER becomes a heavy nop, consumer->filter() always returns F at this stage. As it was already discussed we need uprobe_register_only() to avoid the costly register_for_each_vma() when possible. - uprobe_apply() is oftenly overkill. Unless "nr_systemwide != 0" changes we need uprobe_apply_mm(), unapply_uprobe() is almost what we need. - uprobe_apply() can be simply avoided sometimes, see the next changes. Testing: # perf probe -x /lib/libc.so.6 syscall # perl -e 'syscall -1 while 1' & [1] 530 # perf record -e probe_libc:syscall perl -e 'syscall -1 for 1..10; sleep 1' # perf report --show-total-period 100.00% 10 perl libc-2.8.so [.] syscall Before this patch: # cat /sys/kernel/debug/tracing/uprobe_profile /lib/libc.so.6 syscall 79291 A huge ->nrhit == 79291 reflects the fact that the background process 530 constantly hits this breakpoint too, even if doesn't contribute to the output. After the patch: # cat /sys/kernel/debug/tracing/uprobe_profile /lib/libc.so.6 syscall 10 This shows that only the target process was punished by int3. Signed-off-by: Oleg Nesterov --- kernel/trace/trace_uprobe.c | 46 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 2a74a93afdae..b7850f535acf 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -557,7 +557,12 @@ static inline bool is_trace_uprobe_enabled(struct trace_uprobe *tu) return tu->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE); } -static int probe_event_enable(struct trace_uprobe *tu, int flag) +typedef bool (*filter_func_t)(struct uprobe_consumer *self, + enum uprobe_filter_ctx ctx, + struct mm_struct *mm); + +static int +probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) { int ret = 0; @@ -567,6 +572,7 @@ static int probe_event_enable(struct trace_uprobe *tu, int flag) WARN_ON(!uprobe_filter_is_empty(&tu->filter)); tu->flags |= flag; + tu->consumer.filter = filter; ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); if (ret) tu->flags &= ~flag; @@ -656,6 +662,22 @@ static int set_print_fmt(struct trace_uprobe *tu) } #ifdef CONFIG_PERF_EVENTS +static bool +__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) +{ + struct perf_event *event; + + if (filter->nr_systemwide) + return true; + + list_for_each_entry(event, &filter->perf_events, hw.tp_list) { + if (event->hw.tp_target->mm == mm) + return true; + } + + return false; +} + static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) { write_lock(&tu->filter.rwlock); @@ -665,6 +687,8 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) tu->filter.nr_systemwide++; write_unlock(&tu->filter.rwlock); + uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); + return 0; } @@ -677,9 +701,25 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) tu->filter.nr_systemwide--; write_unlock(&tu->filter.rwlock); + uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); + return 0; } +static bool uprobe_perf_filter(struct uprobe_consumer *uc, + enum uprobe_filter_ctx ctx, struct mm_struct *mm) +{ + struct trace_uprobe *tu; + int ret; + + tu = container_of(uc, struct trace_uprobe, consumer); + read_lock(&tu->filter.rwlock); + ret = __uprobe_perf_filter(&tu->filter, mm); + read_unlock(&tu->filter.rwlock); + + return ret; +} + /* uprobe profile handler */ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) { @@ -722,7 +762,7 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, switch (type) { case TRACE_REG_REGISTER: - return probe_event_enable(tu, TP_FLAG_TRACE); + return probe_event_enable(tu, TP_FLAG_TRACE, NULL); case TRACE_REG_UNREGISTER: probe_event_disable(tu, TP_FLAG_TRACE); @@ -730,7 +770,7 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: - return probe_event_enable(tu, TP_FLAG_PROFILE); + return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter); case TRACE_REG_PERF_UNREGISTER: probe_event_disable(tu, TP_FLAG_PROFILE); -- cgit v1.2.3-59-g8ed1b From f42d24a1d20d2e72d1e5d48930f18b138dfad117 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 4 Feb 2013 17:48:34 +0100 Subject: uprobes/perf: Teach trace_uprobe/perf code to use UPROBE_HANDLER_REMOVE Change uprobe_trace_func() and uprobe_perf_func() to return "int". Change uprobe_dispatcher() to return "trace_ret | perf_ret" although this is not needed, currently TP_FLAG_TRACE/TP_FLAG_PROFILE are mutually exclusive. The only functional change is that uprobe_perf_func() checks the filtering too and returns UPROBE_HANDLER_REMOVE if nobody wants to trace current. Testing: # perf probe -x /lib/libc.so.6 syscall # perf record -e probe_libc:syscall -i perl -e 'fork; syscall -1 for 1..10; wait' # perf report --show-total-period 100.00% 10 perl libc-2.8.so [.] syscall Before this patch: # cat /sys/kernel/debug/tracing/uprobe_profile /lib/libc.so.6 syscall 20 A child process doesn't have a counter, but still it hits this breakoint "copied" by dup_mmap(). After the patch: # cat /sys/kernel/debug/tracing/uprobe_profile /lib/libc.so.6 syscall 11 The child process hits this int3 only once and does unapply_uprobe(). Signed-off-by: Oleg Nesterov --- kernel/trace/trace_uprobe.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index b7850f535acf..2399f1416555 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -486,7 +486,7 @@ static const struct file_operations uprobe_profile_ops = { }; /* uprobe handler */ -static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) +static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) { struct uprobe_trace_entry_head *entry; struct ring_buffer_event *event; @@ -504,7 +504,7 @@ static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) event = trace_current_buffer_lock_reserve(&buffer, call->event.type, size, irq_flags, pc); if (!event) - return; + return 0; entry = ring_buffer_event_data(event); entry->ip = instruction_pointer(task_pt_regs(current)); @@ -514,6 +514,8 @@ static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) if (!filter_current_check_discard(buffer, call, entry, event)) trace_buffer_unlock_commit(buffer, event, irq_flags, pc); + + return 0; } /* Event entry printers */ @@ -721,7 +723,7 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, } /* uprobe profile handler */ -static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) +static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) { struct ftrace_event_call *call = &tu->call; struct uprobe_trace_entry_head *entry; @@ -730,11 +732,14 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) int size, __size, i; int rctx; + if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) + return UPROBE_HANDLER_REMOVE; + __size = sizeof(*entry) + tu->size; size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) - return; + return 0; preempt_disable(); @@ -752,6 +757,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) out: preempt_enable(); + return 0; } #endif /* CONFIG_PERF_EVENTS */ @@ -792,18 +798,19 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) { struct trace_uprobe *tu; + int ret = 0; tu = container_of(con, struct trace_uprobe, consumer); tu->nhit++; if (tu->flags & TP_FLAG_TRACE) - uprobe_trace_func(tu, regs); + ret |= uprobe_trace_func(tu, regs); #ifdef CONFIG_PERF_EVENTS if (tu->flags & TP_FLAG_PROFILE) - uprobe_perf_func(tu, regs); + ret |= uprobe_perf_func(tu, regs); #endif - return 0; + return ret; } static struct trace_event_functions uprobe_funcs = { -- cgit v1.2.3-59-g8ed1b From b2fe8ba674e8acbb9e8e63510b802c6d054d88a3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 4 Feb 2013 19:05:43 +0100 Subject: uprobes/perf: Avoid uprobe_apply() whenever possible uprobe_perf_open/close call the costly uprobe_apply() every time, we can avoid it if: - "nr_systemwide != 0" is not changed. - There is another process/thread with the same ->mm. - copy_proccess() does inherit_event(). dup_mmap() preserves the inserted breakpoints. - event->attr.enable_on_exec == T, we can rely on uprobe_mmap() called by exec/mmap paths. - tp_target is exiting. Only _close() checks PF_EXITING, I don't think TRACE_REG_PERF_OPEN can hit the dying task too often. Signed-off-by: Oleg Nesterov --- kernel/trace/trace_uprobe.c | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 2399f1416555..8dad2a92dee9 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -680,30 +680,60 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) return false; } +static inline bool +uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) +{ + return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm); +} + static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) { + bool done; + write_lock(&tu->filter.rwlock); - if (event->hw.tp_target) + if (event->hw.tp_target) { + /* + * event->parent != NULL means copy_process(), we can avoid + * uprobe_apply(). current->mm must be probed and we can rely + * on dup_mmap() which preserves the already installed bp's. + * + * attr.enable_on_exec means that exec/mmap will install the + * breakpoints we need. + */ + done = tu->filter.nr_systemwide || + event->parent || event->attr.enable_on_exec || + uprobe_filter_event(tu, event); list_add(&event->hw.tp_list, &tu->filter.perf_events); - else + } else { + done = tu->filter.nr_systemwide; tu->filter.nr_systemwide++; + } write_unlock(&tu->filter.rwlock); - uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); + if (!done) + uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); return 0; } static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) { + bool done; + write_lock(&tu->filter.rwlock); - if (event->hw.tp_target) + if (event->hw.tp_target) { list_del(&event->hw.tp_list); - else + done = tu->filter.nr_systemwide || + (event->hw.tp_target->flags & PF_EXITING) || + uprobe_filter_event(tu, event); + } else { tu->filter.nr_systemwide--; + done = tu->filter.nr_systemwide; + } write_unlock(&tu->filter.rwlock); - uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); + if (!done) + uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); return 0; } -- cgit v1.2.3-59-g8ed1b From eeb49845425375481f14c0e5721f88242642e88e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 7 Feb 2013 18:02:11 +0900 Subject: perf buildid-cache: Add --update option When adding vmlinux file to build-id cache, it'd be fail since kallsyms dso with a same build-id was already added by perf record. So one needs to remove the kallsyms first to add vmlinux into the cache. Add --update option for doing it at once. Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Borislav Petkov Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360227734-375-5-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-buildid-cache.txt | 4 ++ tools/perf/builtin-buildid-cache.c | 50 ++++++++++++++++++++++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt index 8e798baae0fd..e9a8349a7172 100644 --- a/tools/perf/Documentation/perf-buildid-cache.txt +++ b/tools/perf/Documentation/perf-buildid-cache.txt @@ -27,6 +27,10 @@ OPTIONS -M:: --missing=:: List missing build ids in the cache for the specified file. +-u:: +--update:: + Update specified file of the cache. It can be used to update kallsyms + kernel dso to vmlinux in order to support annotation. -v:: --verbose:: Be more verbose. diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index a336014e0286..c96c8fa38243 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -93,6 +93,32 @@ static int build_id_cache__fprintf_missing(const char *filename, bool force, FIL return 0; } +static int build_id_cache__update_file(const char *filename, + const char *debugdir) +{ + u8 build_id[BUILD_ID_SIZE]; + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + int err; + + if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) { + pr_debug("Couldn't read a build-id in %s\n", filename); + return -1; + } + + build_id__sprintf(build_id, sizeof(build_id), sbuild_id); + err = build_id_cache__remove_s(sbuild_id, debugdir); + if (!err) { + err = build_id_cache__add_s(sbuild_id, debugdir, filename, + false, false); + } + if (verbose) + pr_info("Updating %s %s: %s\n", sbuild_id, filename, + err ? "FAIL" : "Ok"); + + return err; +} + int cmd_buildid_cache(int argc, const char **argv, const char *prefix __maybe_unused) { @@ -103,7 +129,9 @@ int cmd_buildid_cache(int argc, const char **argv, char debugdir[PATH_MAX]; char const *add_name_list_str = NULL, *remove_name_list_str = NULL, - *missing_filename = NULL; + *missing_filename = NULL, + *update_name_list_str = NULL; + const struct option buildid_cache_options[] = { OPT_STRING('a', "add", &add_name_list_str, "file list", "file(s) to add"), @@ -112,6 +140,8 @@ int cmd_buildid_cache(int argc, const char **argv, OPT_STRING('M', "missing", &missing_filename, "file", "to find missing build ids in the cache"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), + OPT_STRING('u', "update", &update_name_list_str, "file list", + "file(s) to update"), OPT_INCR('v', "verbose", &verbose, "be more verbose"), OPT_END() }; @@ -169,5 +199,23 @@ int cmd_buildid_cache(int argc, const char **argv, if (missing_filename) ret = build_id_cache__fprintf_missing(missing_filename, force, stdout); + if (update_name_list_str) { + list = strlist__new(true, update_name_list_str); + if (list) { + strlist__for_each(pos, list) + if (build_id_cache__update_file(pos->s, debugdir)) { + if (errno == ENOENT) { + pr_debug("%s wasn't in the cache\n", + pos->s); + continue; + } + pr_warning("Couldn't update %s: %s\n", + pos->s, strerror(errno)); + } + + strlist__delete(list); + } + } + return ret; } -- cgit v1.2.3-59-g8ed1b From e3a34029c635b7dee06e51d99441578b96c7d463 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 7 Feb 2013 18:02:12 +0900 Subject: perf annotate: Fix warning message on a missing vmlinux When perf annotate runs with no vmlinux file it cannot annotate kernel symbols because the kallsyms only provides symbol addresses. So it recommends to run perf buildid-cache to install proper vmlinux image. But running perf buildid-cache -av vmlinux as the message gives me a following error: $ perf buildid-cache -av /home/namhyung/build/kernel/vmlinux Couldn't add v: No such file or directory Since the -a option receives a parameter, 'v' should not be after the option. In addition -a option is not work for this case since the build-id cache already has a kallsyms with same build-id so it'll fail with EEXIST. Use recently added -u (--update) option for it. Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Borislav Petkov Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360227734-375-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 07aaeea60000..d33fe937e6f1 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -809,7 +809,7 @@ fallback: pr_err("Can't annotate %s:\n\n" "No vmlinux file%s\nwas found in the path.\n\n" "Please use:\n\n" - " perf buildid-cache -av vmlinux\n\n" + " perf buildid-cache -vu vmlinux\n\n" "or:\n\n" " --vmlinux vmlinux\n", sym->name, build_id_msg ?: ""); -- cgit v1.2.3-59-g8ed1b From 2b676bf068916046151277f27113f80828e33001 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 7 Feb 2013 18:02:08 +0900 Subject: perf ui/gtk: Implement basic GTK2 annotation browser Basic implementation of perf annotate on GTK2. Currently only shows first symbol. Add a new --gtk option to use it. Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Borislav Petkov Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360227734-375-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-annotate.txt | 4 +- tools/perf/Makefile | 1 + tools/perf/builtin-annotate.c | 10 +- tools/perf/ui/gtk/annotate.c | 185 +++++++++++++++++++++++++++++ tools/perf/ui/setup.c | 2 +- tools/perf/util/annotate.h | 20 ++++ 6 files changed, 218 insertions(+), 4 deletions(-) create mode 100644 tools/perf/ui/gtk/annotate.c diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt index c8ffd9fd5c6a..e5e1d06efc37 100644 --- a/tools/perf/Documentation/perf-annotate.txt +++ b/tools/perf/Documentation/perf-annotate.txt @@ -61,11 +61,13 @@ OPTIONS --stdio:: Use the stdio interface. ---tui:: Use the TUI interface Use of --tui requires a tty, if one is not +--tui:: Use the TUI interface. Use of --tui requires a tty, if one is not present, as when piping to other commands, the stdio interface is used. This interfaces starts by centering on the line with more samples, TAB/UNTAB cycles through the lines with more samples. +--gtk:: Use the GTK interface. + -C:: --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can be provided as a comma-separated list with no space: 0,1. Ranges of diff --git a/tools/perf/Makefile b/tools/perf/Makefile index a158309a65ef..1c8df6f9c4d9 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -698,6 +698,7 @@ ifndef NO_GTK2 LIB_OBJS += $(OUTPUT)ui/gtk/util.o LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o LIB_OBJS += $(OUTPUT)ui/gtk/progress.o + LIB_OBJS += $(OUTPUT)ui/gtk/annotate.o endif endif diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 95a2ad3f043e..9d758c9611a5 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -34,7 +34,7 @@ struct perf_annotate { struct perf_tool tool; - bool force, use_tui, use_stdio; + bool force, use_tui, use_stdio, use_gtk; bool full_paths; bool print_line; const char *sym_hist_filter; @@ -138,7 +138,10 @@ find_next: continue; } - if (use_browser > 0) { + if (use_browser == 2) { + hist_entry__gtk_annotate(he, evidx, NULL); + return; + } else if (use_browser == 1) { key = hist_entry__tui_annotate(he, evidx, NULL); switch (key) { case K_RIGHT: @@ -270,6 +273,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), + OPT_BOOLEAN(0, "gtk", &annotate.use_gtk, "Use the GTK interface"), OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"), OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, @@ -300,6 +304,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) use_browser = 0; else if (annotate.use_tui) use_browser = 1; + else if (annotate.use_gtk) + use_browser = 2; setup_browser(true); diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c new file mode 100644 index 000000000000..19d84fa327af --- /dev/null +++ b/tools/perf/ui/gtk/annotate.c @@ -0,0 +1,185 @@ +#include "gtk.h" +#include "util/debug.h" +#include "util/annotate.h" +#include "ui/helpline.h" + + +enum { + ANN_COL__PERCENT, + ANN_COL__OFFSET, + ANN_COL__LINE, + + MAX_ANN_COLS +}; + +static const char *const col_names[] = { + "Overhead", + "Offset", + "Line" +}; + +static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym, + struct disasm_line *dl, int evidx) +{ + struct sym_hist *symhist; + double percent = 0.0; + const char *markup; + int ret = 0; + + strcpy(buf, ""); + + if (dl->offset == (s64) -1) + return 0; + + symhist = annotation__histogram(symbol__annotation(sym), evidx); + if (!symhist->addr[dl->offset]) + return 0; + + percent = 100.0 * symhist->addr[dl->offset] / symhist->sum; + + markup = perf_gtk__get_percent_color(percent); + if (markup) + ret += scnprintf(buf, size, "%s", markup); + ret += scnprintf(buf + ret, size - ret, "%6.2f%%", percent); + if (markup) + ret += scnprintf(buf + ret, size - ret, ""); + + return ret; +} + +static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym, + struct map *map, struct disasm_line *dl) +{ + u64 start = map__rip_2objdump(map, sym->start); + + strcpy(buf, ""); + + if (dl->offset == (s64) -1) + return 0; + + return scnprintf(buf, size, "%"PRIx64, start + dl->offset); +} + +static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym, + struct map *map, int evidx, + struct hist_browser_timer *hbt __maybe_unused) +{ + struct disasm_line *pos, *n; + struct annotation *notes; + GType col_types[MAX_ANN_COLS]; + GtkCellRenderer *renderer; + GtkListStore *store; + GtkWidget *view; + int i; + char s[512]; + + if (map->dso->annotate_warned) + return -1; + + if (symbol__annotate(sym, map, 0) < 0) { + ui__error("%s", ui_helpline__current); + return -1; + } + + notes = symbol__annotation(sym); + + for (i = 0; i < MAX_ANN_COLS; i++) { + col_types[i] = G_TYPE_STRING; + } + store = gtk_list_store_newv(MAX_ANN_COLS, col_types); + + view = gtk_tree_view_new(); + renderer = gtk_cell_renderer_text_new(); + + for (i = 0; i < MAX_ANN_COLS; i++) { + gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), + -1, col_names[i], renderer, + i == ANN_COL__PERCENT ? "markup" : "text", + i, NULL); + } + + gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); + g_object_unref(GTK_TREE_MODEL(store)); + + list_for_each_entry(pos, ¬es->src->source, node) { + GtkTreeIter iter; + + gtk_list_store_append(store, &iter); + + if (perf_gtk__get_percent(s, sizeof(s), sym, pos, evidx)) + gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1); + if (perf_gtk__get_offset(s, sizeof(s), sym, map, pos)) + gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1); + gtk_list_store_set(store, &iter, ANN_COL__LINE, pos->line, -1); + } + + gtk_container_add(GTK_CONTAINER(window), view); + + list_for_each_entry_safe(pos, n, ¬es->src->source, node) { + list_del(&pos->node); + disasm_line__free(pos); + } + + return 0; +} + +int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx, + struct hist_browser_timer *hbt) +{ + GtkWidget *vbox; + GtkWidget *notebook; + GtkWidget *infobar; + GtkWidget *statbar; + GtkWidget *window; + GtkWidget *scrolled_window; + GtkWidget *tab_label; + + signal(SIGSEGV, perf_gtk__signal); + signal(SIGFPE, perf_gtk__signal); + signal(SIGINT, perf_gtk__signal); + signal(SIGQUIT, perf_gtk__signal); + signal(SIGTERM, perf_gtk__signal); + + window = gtk_window_new(GTK_WINDOW_TOPLEVEL); + gtk_window_set_title(GTK_WINDOW(window), "perf annotate"); + + g_signal_connect(window, "delete_event", gtk_main_quit, NULL); + + pgctx = perf_gtk__activate_context(window); + if (!pgctx) + return -1; + + vbox = gtk_vbox_new(FALSE, 0); + notebook = gtk_notebook_new(); + scrolled_window = gtk_scrolled_window_new(NULL, NULL); + tab_label = gtk_label_new(sym->name); + + gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), + GTK_POLICY_AUTOMATIC, + GTK_POLICY_AUTOMATIC); + + gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, + tab_label); + gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); + + infobar = perf_gtk__setup_info_bar(); + if (infobar) + gtk_box_pack_start(GTK_BOX(vbox), infobar, FALSE, FALSE, 0); + + statbar = perf_gtk__setup_statusbar(); + gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); + + gtk_container_add(GTK_CONTAINER(window), vbox); + + perf_gtk__annotate_symbol(scrolled_window, sym, map, evidx, hbt); + + gtk_widget_show_all(window); + + perf_gtk__resize_window(window); + gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); + + gtk_main(); + + perf_gtk__deactivate_context(&pgctx); + return 0; +} diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c index 166f13df3134..ae6a789cb0f6 100644 --- a/tools/perf/ui/setup.c +++ b/tools/perf/ui/setup.c @@ -8,7 +8,7 @@ pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; void setup_browser(bool fallback_to_pager) { - if (!isatty(1) || dump_trace) + if (use_browser < 2 && (!isatty(1) || dump_trace)) use_browser = 0; /* default to TUI */ diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 8eec94358a4a..a8ccbda4aeb7 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -6,6 +6,7 @@ #include "types.h" #include "symbol.h" #include "hist.h" +#include "sort.h" #include #include #include @@ -154,6 +155,25 @@ static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, } #endif +#ifdef GTK2_SUPPORT +int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx, + struct hist_browser_timer *hbt); + +static inline int hist_entry__gtk_annotate(struct hist_entry *he, int evidx, + struct hist_browser_timer *hbt) +{ + return symbol__gtk_annotate(he->ms.sym, he->ms.map, evidx, hbt); +} +#else +static inline int hist_entry__gtk_annotate(struct hist_entry *he __maybe_unused, + int evidx __maybe_unused, + struct hist_browser_timer *hbt + __maybe_unused) +{ + return 0; +} +#endif + extern const char *disassembler_style; #endif /* __PERF_ANNOTATE_H */ -- cgit v1.2.3-59-g8ed1b From 7a60ba948267336d77a48a3539f98151f9dcfba6 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 7 Feb 2013 18:02:09 +0900 Subject: perf gtk/annotate: Support multiple event annotation Show multiple annotation result for each evsel. Each result represents the most frquently sampled symbol/function for the evsel and it will be shown in a tab window. For this add a reference to main container (notebook) to the pgctx. At the first call to annotate browser, hist_entry__find_annotations() will setup a new browser, and next calls will add new tabs to the browser. But it requires final perf_gtk__show_annotations() to start processing GUI events. Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Borislav Petkov Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360227734-375-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-annotate.c | 4 +++ tools/perf/ui/gtk/annotate.c | 74 +++++++++++++++++++++++++++---------------- tools/perf/ui/gtk/gtk.h | 1 + tools/perf/util/annotate.h | 4 +++ 4 files changed, 56 insertions(+), 27 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 9d758c9611a5..68e3a16abd3d 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -227,6 +227,10 @@ static int __cmd_annotate(struct perf_annotate *ann) ui__error("The %s file has no samples!\n", session->filename); goto out_delete; } + + if (use_browser == 2) + perf_gtk__show_annotations(); + out_delete: /* * Speed up the exit process, for large files this can diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c index 19d84fa327af..1ce89f2558fa 100644 --- a/tools/perf/ui/gtk/annotate.c +++ b/tools/perf/ui/gtk/annotate.c @@ -126,31 +126,52 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym, int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx, struct hist_browser_timer *hbt) { - GtkWidget *vbox; - GtkWidget *notebook; - GtkWidget *infobar; - GtkWidget *statbar; GtkWidget *window; + GtkWidget *notebook; GtkWidget *scrolled_window; GtkWidget *tab_label; - signal(SIGSEGV, perf_gtk__signal); - signal(SIGFPE, perf_gtk__signal); - signal(SIGINT, perf_gtk__signal); - signal(SIGQUIT, perf_gtk__signal); - signal(SIGTERM, perf_gtk__signal); + if (perf_gtk__is_active_context(pgctx)) { + window = pgctx->main_window; + notebook = pgctx->notebook; + } else { + GtkWidget *vbox; + GtkWidget *infobar; + GtkWidget *statbar; - window = gtk_window_new(GTK_WINDOW_TOPLEVEL); - gtk_window_set_title(GTK_WINDOW(window), "perf annotate"); + signal(SIGSEGV, perf_gtk__signal); + signal(SIGFPE, perf_gtk__signal); + signal(SIGINT, perf_gtk__signal); + signal(SIGQUIT, perf_gtk__signal); + signal(SIGTERM, perf_gtk__signal); - g_signal_connect(window, "delete_event", gtk_main_quit, NULL); + window = gtk_window_new(GTK_WINDOW_TOPLEVEL); + gtk_window_set_title(GTK_WINDOW(window), "perf annotate"); - pgctx = perf_gtk__activate_context(window); - if (!pgctx) - return -1; + g_signal_connect(window, "delete_event", gtk_main_quit, NULL); + + pgctx = perf_gtk__activate_context(window); + if (!pgctx) + return -1; + + vbox = gtk_vbox_new(FALSE, 0); + notebook = gtk_notebook_new(); + pgctx->notebook = notebook; + + gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); + + infobar = perf_gtk__setup_info_bar(); + if (infobar) { + gtk_box_pack_start(GTK_BOX(vbox), infobar, + FALSE, FALSE, 0); + } + + statbar = perf_gtk__setup_statusbar(); + gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); + + gtk_container_add(GTK_CONTAINER(window), vbox); + } - vbox = gtk_vbox_new(FALSE, 0); - notebook = gtk_notebook_new(); scrolled_window = gtk_scrolled_window_new(NULL, NULL); tab_label = gtk_label_new(sym->name); @@ -160,19 +181,19 @@ int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx, gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); - gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); - - infobar = perf_gtk__setup_info_bar(); - if (infobar) - gtk_box_pack_start(GTK_BOX(vbox), infobar, FALSE, FALSE, 0); - statbar = perf_gtk__setup_statusbar(); - gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); + perf_gtk__annotate_symbol(scrolled_window, sym, map, evidx, hbt); + return 0; +} - gtk_container_add(GTK_CONTAINER(window), vbox); +void perf_gtk__show_annotations(void) +{ + GtkWidget *window; - perf_gtk__annotate_symbol(scrolled_window, sym, map, evidx, hbt); + if (!perf_gtk__is_active_context(pgctx)) + return; + window = pgctx->main_window; gtk_widget_show_all(window); perf_gtk__resize_window(window); @@ -181,5 +202,4 @@ int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx, gtk_main(); perf_gtk__deactivate_context(&pgctx); - return 0; } diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h index 5d3693754828..3d96785ef155 100644 --- a/tools/perf/ui/gtk/gtk.h +++ b/tools/perf/ui/gtk/gtk.h @@ -10,6 +10,7 @@ struct perf_gtk_context { GtkWidget *main_window; + GtkWidget *notebook; #ifdef HAVE_GTK_INFO_BAR GtkWidget *info_bar; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index a8ccbda4aeb7..c422440fe611 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -164,6 +164,8 @@ static inline int hist_entry__gtk_annotate(struct hist_entry *he, int evidx, { return symbol__gtk_annotate(he->ms.sym, he->ms.map, evidx, hbt); } + +void perf_gtk__show_annotations(void); #else static inline int hist_entry__gtk_annotate(struct hist_entry *he __maybe_unused, int evidx __maybe_unused, @@ -172,6 +174,8 @@ static inline int hist_entry__gtk_annotate(struct hist_entry *he __maybe_unused, { return 0; } + +static inline void perf_gtk__show_annotations(void) {} #endif extern const char *disassembler_style; -- cgit v1.2.3-59-g8ed1b From 237522378604a2e26e19a8b11a70171eaf98c6c5 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 7 Feb 2013 18:02:10 +0900 Subject: perf gtk/annotate: Show source lines with gray color In order to differentiate source lines from asm line, print them with gray color. To do this, it needs to be escaped since sometimes it contains "<" and/or ">" characters so that it should not be considered as a markup tags. Use glib's g_markup_escape_text() for this. Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Borislav Petkov Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360227734-375-4-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/gtk/annotate.c | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c index 1ce89f2558fa..2fe056b0096c 100644 --- a/tools/perf/ui/gtk/annotate.c +++ b/tools/perf/ui/gtk/annotate.c @@ -60,6 +60,30 @@ static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym, return scnprintf(buf, size, "%"PRIx64, start + dl->offset); } +static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl) +{ + int ret = 0; + char *line = g_markup_escape_text(dl->line, -1); + const char *markup = ""; + + strcpy(buf, ""); + + if (!line) + return 0; + + if (dl->offset != (s64) -1) + markup = NULL; + + if (markup) + ret += scnprintf(buf, size, "%s", markup); + ret += scnprintf(buf + ret, size - ret, "%s", line); + if (markup) + ret += scnprintf(buf + ret, size - ret, ""); + + g_free(line); + return ret; +} + static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym, struct map *map, int evidx, struct hist_browser_timer *hbt __maybe_unused) @@ -93,8 +117,7 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym, for (i = 0; i < MAX_ANN_COLS; i++) { gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), - -1, col_names[i], renderer, - i == ANN_COL__PERCENT ? "markup" : "text", + -1, col_names[i], renderer, "markup", i, NULL); } @@ -110,7 +133,8 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym, gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1); if (perf_gtk__get_offset(s, sizeof(s), sym, map, pos)) gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1); - gtk_list_store_set(store, &iter, ANN_COL__LINE, pos->line, -1); + if (perf_gtk__get_line(s, sizeof(s), pos)) + gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1); } gtk_container_add(GTK_CONTAINER(window), view); -- cgit v1.2.3-59-g8ed1b From c0e79be74907b4654b622601692e1a27fd1dbeb3 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 7 Feb 2013 18:02:13 +0900 Subject: perf gtk/annotate: Fail early if it can't annotate Don't need to setup a browser window if annotate cannot work. Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Borislav Petkov Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360227734-375-7-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/gtk/annotate.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c index 2fe056b0096c..7d8dc581a545 100644 --- a/tools/perf/ui/gtk/annotate.c +++ b/tools/perf/ui/gtk/annotate.c @@ -97,14 +97,6 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym, int i; char s[512]; - if (map->dso->annotate_warned) - return -1; - - if (symbol__annotate(sym, map, 0) < 0) { - ui__error("%s", ui_helpline__current); - return -1; - } - notes = symbol__annotation(sym); for (i = 0; i < MAX_ANN_COLS; i++) { @@ -155,6 +147,14 @@ int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx, GtkWidget *scrolled_window; GtkWidget *tab_label; + if (map->dso->annotate_warned) + return -1; + + if (symbol__annotate(sym, map, 0) < 0) { + ui__error("%s", ui_helpline__current); + return -1; + } + if (perf_gtk__is_active_context(pgctx)) { window = pgctx->main_window; notebook = pgctx->notebook; -- cgit v1.2.3-59-g8ed1b From 18c9e5c567e1bc475edc67dca3680ecd2562dc5c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 7 Feb 2013 18:02:14 +0900 Subject: perf annotate: Make it to be able to skip unannotatable symbols Add --skip-missing option for skipping symbols that cannot be used for annotation. It's the case of kernel symbols that user doesn't have a vmlinux image file. Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Borislav Petkov Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360227734-375-8-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-annotate.txt | 3 +++ tools/perf/builtin-annotate.c | 17 +++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt index e5e1d06efc37..5ad07ef417f0 100644 --- a/tools/perf/Documentation/perf-annotate.txt +++ b/tools/perf/Documentation/perf-annotate.txt @@ -90,6 +90,9 @@ OPTIONS --objdump=:: Path to objdump binary. +--skip-missing:: + Skip symbols that cannot be annotated. + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-report[1] diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 68e3a16abd3d..2e6961ea3184 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -37,6 +37,7 @@ struct perf_annotate { bool force, use_tui, use_stdio, use_gtk; bool full_paths; bool print_line; + bool skip_missing; const char *sym_hist_filter; const char *cpu_list; DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); @@ -139,11 +140,21 @@ find_next: } if (use_browser == 2) { - hist_entry__gtk_annotate(he, evidx, NULL); - return; + int ret; + + ret = hist_entry__gtk_annotate(he, evidx, NULL); + if (!ret || !ann->skip_missing) + return; + + /* skip missing symbols */ + nd = rb_next(nd); } else if (use_browser == 1) { key = hist_entry__tui_annotate(he, evidx, NULL); switch (key) { + case -1: + if (!ann->skip_missing) + return; + /* fall through */ case K_RIGHT: next = rb_next(nd); break; @@ -288,6 +299,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) "print matching source lines (may be slow)"), OPT_BOOLEAN('P', "full-paths", &annotate.full_paths, "Don't shorten the displayed pathnames"), + OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing, + "Skip symbols that cannot be annotated"), OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), -- cgit v1.2.3-59-g8ed1b From 02320931ce000836aa231ac1311769e303122099 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 14 Feb 2013 15:59:33 -0300 Subject: perf tools: Limit unwind support to x86 archs There's DWARF unwind support only for x86 archs, so limit the unwind.o object to them only. Without this building for other archs (e.g. cross compiling for ARM) is broken. Signed-off-by: Jiri Olsa Signed-off-by: Dirk Behme Link: http://lkml.kernel.org/n/tip-viqtvd6hppqgt68zz4wlqm20@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 1c8df6f9c4d9..b90528d42b03 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -581,6 +581,11 @@ else endif # SOURCE_LIBELF endif # NO_LIBELF +# There's only x86 (both 32 and 64) support for CFI unwind so far +ifneq ($(ARCH),x86) + NO_LIBUNWIND := 1 +endif + ifndef NO_LIBUNWIND # for linking with debug library, run like: # make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/ -- cgit v1.2.3-59-g8ed1b From 85df3b3769222894e9692b383c7af124b7721086 Mon Sep 17 00:00:00 2001 From: Vinson Lee Date: Wed, 13 Feb 2013 13:48:58 -0800 Subject: perf tools: Fix build with bison 2.3 and older. The %name-prefix "prefix" syntax is not available on bison 2.3 and older. Substitute with the -p "prefix" command-line option for compatibility with older versions of bison. This patch fixes this build error with older versions of bison. CC util/sysfs.o BISON util/pmu-bison.c util/pmu.y:2.14-24: syntax error, unexpected string, expecting = make: *** [util/pmu-bison.c] Error 1 Signed-off-by: Vinson Lee Tested-by: Li Zefan Cc: stable@vger.kernel.org # 3.4+ Cc: Ingo Molnar Cc: Jiri Olsa Cc: Li Zefan Cc: Namhyung Kim Cc: Paul Mackerras Cc: Pekka Enberg Link: http://lkml.kernel.org/r/1360792138-29186-1-git-send-email-vlee@twitter.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 4 ++-- tools/perf/util/parse-events.y | 1 - tools/perf/util/pmu.y | 1 - 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b90528d42b03..a2108ca1cc17 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -296,13 +296,13 @@ $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-event $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c: util/parse-events.y - $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c + $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_ $(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c: util/pmu.y - $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c + $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_ $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 4de2fdca98c8..afc44c18dfe1 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -1,5 +1,4 @@ %pure-parser -%name-prefix "parse_events_" %parse-param {void *_data} %parse-param {void *scanner} %lex-param {void* scanner} diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y index ec898047ebb9..bfd7e8509869 100644 --- a/tools/perf/util/pmu.y +++ b/tools/perf/util/pmu.y @@ -1,5 +1,4 @@ -%name-prefix "perf_pmu_" %parse-param {struct list_head *format} %parse-param {char *name} -- cgit v1.2.3-59-g8ed1b From 02e176af92f3e2e9ec3a48792036566af2dcd534 Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Wed, 6 Feb 2013 23:29:20 +0200 Subject: perf/hwbp: Fix cleanup in case of kzalloc failure Obviously this is a typo and could result in memory leaks if kzalloc fails on a given cpu. Signed-off-by: Daniel Baluta Acked-by: Frederic Weisbecker Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360186160-7566-1-git-send-email-dbaluta@ixiacom.com Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/hw_breakpoint.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index fe8a916507ed..a64f8aeb5c1f 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -676,7 +676,7 @@ int __init init_hw_breakpoint(void) err_alloc: for_each_possible_cpu(err_cpu) { for (i = 0; i < TYPE_MAX; i++) - kfree(per_cpu(nr_task_bp_pinned[i], cpu)); + kfree(per_cpu(nr_task_bp_pinned[i], err_cpu)); if (err_cpu == cpu) break; } -- cgit v1.2.3-59-g8ed1b From e259514eef764a5286873618e34c560ecb6cff13 Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Wed, 6 Feb 2013 11:26:29 -0600 Subject: perf/x86/amd: Enable northbridge performance counters on AMD family 15h On AMD family 15h processors, there are 4 new performance counters (in addition to 6 core performance counters) that can be used for counting northbridge events (i.e. DRAM accesses). Their bit fields are almost identical to the core performance counters. However, unlike the core performance counters, these MSRs are shared between multiple cores (that share the same northbridge). We will reuse the same code path as existing family 10h northbridge event constraints handler logic to enforce this sharing. Signed-off-by: Jacob Shin Acked-by: Stephane Eranian Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Jacob Shin Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1360171589-6381-7-git-send-email-jacob.shin@amd.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeature.h | 2 + arch/x86/include/asm/perf_event.h | 9 ++ arch/x86/include/uapi/asm/msr-index.h | 2 + arch/x86/kernel/cpu/perf_event_amd.c | 171 ++++++++++++++++++++++++++++++---- 4 files changed, 164 insertions(+), 20 deletions(-) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 2d9075e863a0..93fe929d1cee 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -167,6 +167,7 @@ #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ +#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */ /* * Auxiliary flags: Linux defined - For features scattered in various @@ -309,6 +310,7 @@ extern const char * const x86_power_flags[32]; #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) +#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 2234eaaecb52..57cb63402213 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -29,9 +29,14 @@ #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL +#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) +#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37 +#define AMD64_EVENTSEL_INT_CORE_SEL_MASK \ + (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT) + #define AMD64_EVENTSEL_EVENT \ (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) #define INTEL_ARCH_EVENT_MASK \ @@ -46,8 +51,12 @@ #define AMD64_RAW_EVENT_MASK \ (X86_RAW_EVENT_MASK | \ AMD64_EVENTSEL_EVENT) +#define AMD64_RAW_EVENT_MASK_NB \ + (AMD64_EVENTSEL_EVENT | \ + ARCH_PERFMON_EVENTSEL_UMASK) #define AMD64_NUM_COUNTERS 4 #define AMD64_NUM_COUNTERS_CORE 6 +#define AMD64_NUM_COUNTERS_NB 4 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 433a59fb1a74..075a40255591 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -194,6 +194,8 @@ /* Fam 15h MSRs */ #define MSR_F15H_PERF_CTL 0xc0010200 #define MSR_F15H_PERF_CTR 0xc0010201 +#define MSR_F15H_NB_PERF_CTL 0xc0010240 +#define MSR_F15H_NB_PERF_CTR 0xc0010241 /* Fam 10h MSRs */ #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 05462f0432d5..dfdab42aed27 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -132,11 +132,14 @@ static u64 amd_pmu_event_map(int hw_event) return amd_perfmon_event_map[hw_event]; } +static struct event_constraint *amd_nb_event_constraint; + /* * Previously calculated offsets */ static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; +static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly; /* * Legacy CPUs: @@ -144,10 +147,14 @@ static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; * * CPUs with core performance counter extensions: * 6 counters starting at 0xc0010200 each offset by 2 + * + * CPUs with north bridge performance counter extensions: + * 4 additional counters starting at 0xc0010240 each offset by 2 + * (indexed right above either one of the above core counters) */ static inline int amd_pmu_addr_offset(int index, bool eventsel) { - int offset; + int offset, first, base; if (!index) return index; @@ -160,7 +167,23 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) if (offset) return offset; - if (!cpu_has_perfctr_core) + if (amd_nb_event_constraint && + test_bit(index, amd_nb_event_constraint->idxmsk)) { + /* + * calculate the offset of NB counters with respect to + * base eventsel or perfctr + */ + + first = find_first_bit(amd_nb_event_constraint->idxmsk, + X86_PMC_IDX_MAX); + + if (eventsel) + base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel; + else + base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr; + + offset = base + ((index - first) << 1); + } else if (!cpu_has_perfctr_core) offset = index; else offset = index << 1; @@ -175,24 +198,36 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) static inline int amd_pmu_rdpmc_index(int index) { - return index; -} + int ret, first; -static int amd_pmu_hw_config(struct perf_event *event) -{ - int ret; + if (!index) + return index; - /* pass precise event sampling to ibs: */ - if (event->attr.precise_ip && get_ibs_caps()) - return -ENOENT; + ret = rdpmc_indexes[index]; - ret = x86_pmu_hw_config(event); if (ret) return ret; - if (has_branch_stack(event)) - return -EOPNOTSUPP; + if (amd_nb_event_constraint && + test_bit(index, amd_nb_event_constraint->idxmsk)) { + /* + * according to the mnual, ECX value of the NB counters is + * the index of the NB counter (0, 1, 2 or 3) plus 6 + */ + + first = find_first_bit(amd_nb_event_constraint->idxmsk, + X86_PMC_IDX_MAX); + ret = index - first + 6; + } else + ret = index; + + rdpmc_indexes[index] = ret; + return ret; +} + +static int amd_core_hw_config(struct perf_event *event) +{ if (event->attr.exclude_host && event->attr.exclude_guest) /* * When HO == GO == 1 the hardware treats that as GO == HO == 0 @@ -206,10 +241,33 @@ static int amd_pmu_hw_config(struct perf_event *event) else if (event->attr.exclude_guest) event->hw.config |= AMD64_EVENTSEL_HOSTONLY; - if (event->attr.type != PERF_TYPE_RAW) - return 0; + return 0; +} - event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; +/* + * NB counters do not support the following event select bits: + * Host/Guest only + * Counter mask + * Invert counter mask + * Edge detect + * OS/User mode + */ +static int amd_nb_hw_config(struct perf_event *event) +{ + /* for NB, we only allow system wide counting mode */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + if (event->attr.exclude_user || event->attr.exclude_kernel || + event->attr.exclude_host || event->attr.exclude_guest) + return -EINVAL; + + event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | + ARCH_PERFMON_EVENTSEL_OS); + + if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB | + ARCH_PERFMON_EVENTSEL_INT)) + return -EINVAL; return 0; } @@ -227,6 +285,11 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) return (hwc->config & 0xe0) == 0xe0; } +static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc) +{ + return amd_nb_event_constraint && amd_is_nb_event(hwc); +} + static inline int amd_has_nb(struct cpu_hw_events *cpuc) { struct amd_nb *nb = cpuc->amd_nb; @@ -234,6 +297,30 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc) return nb && nb->nb_id != -1; } +static int amd_pmu_hw_config(struct perf_event *event) +{ + int ret; + + /* pass precise event sampling to ibs: */ + if (event->attr.precise_ip && get_ibs_caps()) + return -ENOENT; + + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + ret = x86_pmu_hw_config(event); + if (ret) + return ret; + + if (event->attr.type == PERF_TYPE_RAW) + event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; + + if (amd_is_perfctr_nb_event(&event->hw)) + return amd_nb_hw_config(event); + + return amd_core_hw_config(event); +} + static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { @@ -254,6 +341,19 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, } } +static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc) +{ + int core_id = cpu_data(smp_processor_id()).cpu_core_id; + + /* deliver interrupts only to this core */ + if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) { + hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE; + hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK; + hwc->config |= (u64)(core_id) << + AMD64_EVENTSEL_INT_CORE_SEL_SHIFT; + } +} + /* * AMD64 NorthBridge events need special treatment because * counter access needs to be synchronized across all cores @@ -299,6 +399,12 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev struct perf_event *old; int idx, new = -1; + if (!c) + c = &unconstrained; + + if (cpuc->is_fake) + return c; + /* * detect if already present, if so reuse * @@ -335,6 +441,9 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev if (new == -1) return &emptyconstraint; + if (amd_is_perfctr_nb_event(hwc)) + amd_nb_interrupt_hw_config(hwc); + return &nb->event_constraints[new]; } @@ -434,7 +543,8 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) return &unconstrained; - return __amd_get_nb_event_constraints(cpuc, event, &unconstrained); + return __amd_get_nb_event_constraints(cpuc, event, + amd_nb_event_constraint); } static void amd_put_event_constraints(struct cpu_hw_events *cpuc, @@ -533,6 +643,9 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); +static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0); +static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0); + static struct event_constraint * amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) { @@ -598,8 +711,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev return &amd_f15_PMC20; } case AMD_EVENT_NB: - /* not yet implemented */ - return &emptyconstraint; + return __amd_get_nb_event_constraints(cpuc, event, + amd_nb_event_constraint); default: return &emptyconstraint; } @@ -647,7 +760,7 @@ static __initconst const struct x86_pmu amd_pmu = { static int setup_event_constraints(void) { - if (boot_cpu_data.x86 >= 0x15) + if (boot_cpu_data.x86 == 0x15) x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; return 0; } @@ -677,6 +790,23 @@ static int setup_perfctr_core(void) return 0; } +static int setup_perfctr_nb(void) +{ + if (!cpu_has_perfctr_nb) + return -ENODEV; + + x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB; + + if (cpu_has_perfctr_core) + amd_nb_event_constraint = &amd_NBPMC96; + else + amd_nb_event_constraint = &amd_NBPMC74; + + printk(KERN_INFO "perf: AMD northbridge performance counters detected\n"); + + return 0; +} + __init int amd_pmu_init(void) { /* Performance-monitoring supported from K7 and later: */ @@ -687,6 +817,7 @@ __init int amd_pmu_init(void) setup_event_constraints(); setup_perfctr_core(); + setup_perfctr_nb(); /* Events are common for all AMDs */ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, -- cgit v1.2.3-59-g8ed1b