aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/perf/util/symbol.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/symbol.c')
-rw-r--r--tools/perf/util/symbol.c1207
1 files changed, 760 insertions, 447 deletions
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 5ddf84dcbae7..8b30c6f16a9e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -18,6 +18,12 @@
#include "annotate.h"
#include "build-id.h"
#include "cap.h"
+#include "cpumap.h"
+#include "debug.h"
+#include "demangle-cxx.h"
+#include "demangle-java.h"
+#include "demangle-ocaml.h"
+#include "demangle-rust-v0.h"
#include "dso.h"
#include "util.h" // lsdir()
#include "debug.h"
@@ -27,6 +33,7 @@
#include "symbol.h"
#include "map_symbol.h"
#include "mem-events.h"
+#include "mem-info.h"
#include "symsrc.h"
#include "strlist.h"
#include "intlist.h"
@@ -34,6 +41,7 @@
#include "header.h"
#include "path.h"
#include <linux/ctype.h>
+#include <linux/log2.h>
#include <linux/zalloc.h>
#include <elf.h>
@@ -63,6 +71,16 @@ struct symbol_conf symbol_conf = {
.res_sample = 0,
};
+struct map_list_node {
+ struct list_head node;
+ struct map *map;
+};
+
+static struct map_list_node *map_list_node__new(void)
+{
+ return malloc(sizeof(struct map_list_node));
+}
+
static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__KALLSYMS,
DSO_BINARY_TYPE__GUEST_KALLSYMS,
@@ -73,6 +91,7 @@ static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__GNU_DEBUGDATA,
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__GUEST_KMODULE,
DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
@@ -85,10 +104,12 @@ static enum dso_binary_type binary_type_symtab[] = {
#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
-static bool symbol_type__filter(char symbol_type)
+static bool symbol_type__filter(char __symbol_type)
{
- symbol_type = toupper(symbol_type);
- return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
+ // Since 'U' == undefined and 'u' == unique global symbol, we can't use toupper there
+ char symbol_type = toupper(__symbol_type);
+ return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B' ||
+ __symbol_type == 'u' || __symbol_type == 'l';
}
static int prefix_underscores_count(const char *str)
@@ -101,11 +122,6 @@ static int prefix_underscores_count(const char *str)
return tail - str;
}
-void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
-{
- p->end = c->start;
-}
-
const char * __weak arch__normalize_symbol_name(const char *name)
{
return name;
@@ -148,6 +164,13 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
else if ((a == 0) && (b > 0))
return SYMBOL_B;
+ if (syma->type != symb->type) {
+ if (syma->type == STT_NOTYPE)
+ return SYMBOL_B;
+ if (symb->type == STT_NOTYPE)
+ return SYMBOL_A;
+ }
+
/* Prefer a non weak symbol over a weak one */
a = syma->binding == STB_WEAK;
b = symb->binding == STB_WEAK;
@@ -197,19 +220,22 @@ void symbols__fixup_duplicate(struct rb_root_cached *symbols)
curr = rb_entry(nd, struct symbol, rb_node);
again:
nd = rb_next(&curr->rb_node);
- next = rb_entry(nd, struct symbol, rb_node);
-
if (!nd)
break;
+ next = rb_entry(nd, struct symbol, rb_node);
if (curr->start != next->start)
continue;
if (choose_best_symbol(curr, next) == SYMBOL_A) {
+ if (next->type == STT_GNU_IFUNC)
+ curr->ifunc_alias = true;
rb_erase_cached(&next->rb_node, symbols);
symbol__delete(next);
goto again;
} else {
+ if (curr->type == STT_GNU_IFUNC)
+ next->ifunc_alias = true;
nd = rb_next(&curr->rb_node);
rb_erase_cached(&curr->rb_node, symbols);
symbol__delete(curr);
@@ -217,7 +243,8 @@ again:
}
}
-void symbols__fixup_end(struct rb_root_cached *symbols)
+/* Update zero-sized symbols using the address of the next symbol */
+void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
{
struct rb_node *nd, *prevnd = rb_first_cached(symbols);
struct symbol *curr, *prev;
@@ -231,36 +258,51 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
prev = curr;
curr = rb_entry(nd, struct symbol, rb_node);
- if (prev->end == prev->start && prev->end != curr->start)
- arch__symbols__fixup_end(prev, curr);
- }
-
- /* Last entry */
- if (curr->end == curr->start)
- curr->end = roundup(curr->start, 4096) + 4096;
-}
+ /*
+ * On some architecture kernel text segment start is located at
+ * some low memory address, while modules are located at high
+ * memory addresses (or vice versa). The gap between end of
+ * kernel text segment and beginning of first module's text
+ * segment is very big. Therefore do not fill this gap and do
+ * not assign it to the kernel dso map (kallsyms).
+ *
+ * Also BPF code can be allocated separately from text segments
+ * and modules. So the last entry in a module should not fill
+ * the gap too.
+ *
+ * In kallsyms, it determines module symbols using '[' character
+ * like in:
+ * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
+ */
+ if (prev->end == prev->start) {
+ const char *prev_mod;
+ const char *curr_mod;
-void maps__fixup_end(struct maps *maps)
-{
- struct map *prev = NULL, *curr;
+ if (!is_kallsyms) {
+ prev->end = curr->start;
+ continue;
+ }
- down_write(&maps->lock);
+ prev_mod = strchr(prev->name, '[');
+ curr_mod = strchr(curr->name, '[');
- maps__for_each_entry(maps, curr) {
- if (prev != NULL && !prev->end)
- prev->end = curr->start;
+ /* Last kernel/module symbol mapped to end of page */
+ if (!prev_mod != !curr_mod)
+ prev->end = roundup(prev->end + 4096, 4096);
+ /* Last symbol in the previous module */
+ else if (prev_mod && strcmp(prev_mod, curr_mod))
+ prev->end = roundup(prev->end + 4096, 4096);
+ else
+ prev->end = curr->start;
- prev = curr;
+ pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
+ __func__, prev->name, prev->end);
+ }
}
- /*
- * We still haven't the actual symbols, so guess the
- * last map final address.
- */
- if (curr && !curr->end)
- curr->end = ~0ULL;
-
- up_write(&maps->lock);
+ /* Last entry */
+ if (curr->end == curr->start)
+ curr->end = roundup(curr->start, 4096) + 4096;
}
struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
@@ -274,7 +316,7 @@ struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *
if (symbol_conf.priv_size) {
if (symbol_conf.init_annotation) {
struct annotation *notes = (void *)sym;
- pthread_mutex_init(&notes->lock, NULL);
+ annotation__init(notes);
}
sym = ((void *)sym) + symbol_conf.priv_size;
}
@@ -294,6 +336,13 @@ struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *
void symbol__delete(struct symbol *sym)
{
+ if (symbol_conf.priv_size) {
+ if (symbol_conf.init_annotation) {
+ struct annotation *notes = symbol__annotation(sym);
+
+ annotation__exit(notes);
+ }
+ }
free(((void *)sym) - symbol_conf.priv_size);
}
@@ -402,38 +451,35 @@ static struct symbol *symbols__next(struct symbol *sym)
return NULL;
}
-static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
+static int symbols__sort_name_cmp(const void *vlhs, const void *vrhs)
{
- struct rb_node **p = &symbols->rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct symbol_name_rb_node *symn, *s;
- bool leftmost = true;
+ const struct symbol *lhs = *((const struct symbol **)vlhs);
+ const struct symbol *rhs = *((const struct symbol **)vrhs);
- symn = container_of(sym, struct symbol_name_rb_node, sym);
-
- while (*p != NULL) {
- parent = *p;
- s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
- if (strcmp(sym->name, s->sym.name) < 0)
- p = &(*p)->rb_left;
- else {
- p = &(*p)->rb_right;
- leftmost = false;
- }
- }
- rb_link_node(&symn->rb_node, parent, p);
- rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
+ return strcmp(lhs->name, rhs->name);
}
-static void symbols__sort_by_name(struct rb_root_cached *symbols,
- struct rb_root_cached *source)
+static struct symbol **symbols__sort_by_name(struct rb_root_cached *source, size_t *len)
{
struct rb_node *nd;
+ struct symbol **result;
+ size_t i = 0, size = 0;
+
+ for (nd = rb_first_cached(source); nd; nd = rb_next(nd))
+ size++;
+
+ result = malloc(sizeof(*result) * size);
+ if (!result)
+ return NULL;
for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
- symbols__insert_by_name(symbols, pos);
+
+ result[i++] = pos;
}
+ qsort(result, size, sizeof(*result), symbols__sort_name_cmp);
+ *len = size;
+ return result;
}
int symbol__match_symbol_name(const char *name, const char *str,
@@ -453,86 +499,103 @@ int symbol__match_symbol_name(const char *name, const char *str,
return arch__compare_symbol_names(name, str);
}
-static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
+static struct symbol *symbols__find_by_name(struct symbol *symbols[],
+ size_t symbols_len,
const char *name,
- enum symbol_tag_include includes)
+ enum symbol_tag_include includes,
+ size_t *found_idx)
{
- struct rb_node *n;
- struct symbol_name_rb_node *s = NULL;
+ size_t i, lower = 0, upper = symbols_len;
+ struct symbol *s = NULL;
- if (symbols == NULL)
- return NULL;
+ if (found_idx)
+ *found_idx = SIZE_MAX;
- n = symbols->rb_root.rb_node;
+ if (!symbols_len)
+ return NULL;
- while (n) {
+ while (lower < upper) {
int cmp;
- s = rb_entry(n, struct symbol_name_rb_node, rb_node);
- cmp = symbol__match_symbol_name(s->sym.name, name, includes);
+ i = (lower + upper) / 2;
+ cmp = symbol__match_symbol_name(symbols[i]->name, name, includes);
if (cmp > 0)
- n = n->rb_left;
+ upper = i;
else if (cmp < 0)
- n = n->rb_right;
- else
+ lower = i + 1;
+ else {
+ if (found_idx)
+ *found_idx = i;
+ s = symbols[i];
break;
+ }
}
-
- if (n == NULL)
- return NULL;
-
- if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
+ if (s && includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) {
/* return first symbol that has same name (if any) */
- for (n = rb_prev(n); n; n = rb_prev(n)) {
- struct symbol_name_rb_node *tmp;
-
- tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
- if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
+ for (; i > 0; i--) {
+ struct symbol *tmp = symbols[i - 1];
+
+ if (!arch__compare_symbol_names(tmp->name, s->name)) {
+ if (found_idx)
+ *found_idx = i - 1;
+ s = tmp;
+ } else
break;
-
- s = tmp;
}
-
- return &s->sym;
+ }
+ assert(!found_idx || !s || s == symbols[*found_idx]);
+ return s;
}
void dso__reset_find_symbol_cache(struct dso *dso)
{
- dso->last_find_result.addr = 0;
- dso->last_find_result.symbol = NULL;
+ dso__set_last_find_result_addr(dso, 0);
+ dso__set_last_find_result_symbol(dso, NULL);
}
void dso__insert_symbol(struct dso *dso, struct symbol *sym)
{
- __symbols__insert(&dso->symbols, sym, dso->kernel);
+ __symbols__insert(dso__symbols(dso), sym, dso__kernel(dso));
/* update the symbol cache if necessary */
- if (dso->last_find_result.addr >= sym->start &&
- (dso->last_find_result.addr < sym->end ||
+ if (dso__last_find_result_addr(dso) >= sym->start &&
+ (dso__last_find_result_addr(dso) < sym->end ||
sym->start == sym->end)) {
- dso->last_find_result.symbol = sym;
+ dso__set_last_find_result_symbol(dso, sym);
}
}
+void dso__delete_symbol(struct dso *dso, struct symbol *sym)
+{
+ rb_erase_cached(&sym->rb_node, dso__symbols(dso));
+ symbol__delete(sym);
+ dso__reset_find_symbol_cache(dso);
+}
+
struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
{
- if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
- dso->last_find_result.addr = addr;
- dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
+ if (dso__last_find_result_addr(dso) != addr || dso__last_find_result_symbol(dso) == NULL) {
+ dso__set_last_find_result_addr(dso, addr);
+ dso__set_last_find_result_symbol(dso, symbols__find(dso__symbols(dso), addr));
}
- return dso->last_find_result.symbol;
+ return dso__last_find_result_symbol(dso);
+}
+
+struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr)
+{
+ return symbols__find(dso__symbols(dso), addr);
}
struct symbol *dso__first_symbol(struct dso *dso)
{
- return symbols__first(&dso->symbols);
+ return symbols__first(dso__symbols(dso));
}
struct symbol *dso__last_symbol(struct dso *dso)
{
- return symbols__last(&dso->symbols);
+ return symbols__last(dso__symbols(dso));
}
struct symbol *dso__next_symbol(struct symbol *sym)
@@ -540,31 +603,43 @@ struct symbol *dso__next_symbol(struct symbol *sym)
return symbols__next(sym);
}
-struct symbol *symbol__next_by_name(struct symbol *sym)
+struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx)
{
- struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
- struct rb_node *n = rb_next(&s->rb_node);
+ if (*idx + 1 >= dso__symbol_names_len(dso))
+ return NULL;
- return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
+ ++*idx;
+ return dso__symbol_names(dso)[*idx];
}
/*
* Returns first symbol that matched with @name.
*/
-struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
+struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx)
{
- struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
- SYMBOL_TAG_INCLUDE__NONE);
- if (!s)
- s = symbols__find_by_name(&dso->symbol_names, name,
- SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
+ struct symbol *s = symbols__find_by_name(dso__symbol_names(dso),
+ dso__symbol_names_len(dso),
+ name, SYMBOL_TAG_INCLUDE__NONE, idx);
+ if (!s) {
+ s = symbols__find_by_name(dso__symbol_names(dso), dso__symbol_names_len(dso),
+ name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx);
+ }
return s;
}
void dso__sort_by_name(struct dso *dso)
{
- dso__set_sorted_by_name(dso);
- return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
+ mutex_lock(dso__lock(dso));
+ if (!dso__sorted_by_name(dso)) {
+ size_t len;
+
+ dso__set_symbol_names(dso, symbols__sort_by_name(dso__symbols(dso), &len));
+ if (dso__symbol_names(dso)) {
+ dso__set_symbol_names_len(dso, len);
+ dso__set_sorted_by_name(dso);
+ }
+ }
+ mutex_unlock(dso__lock(dso));
}
/*
@@ -657,15 +732,19 @@ static bool symbol__is_idle(const char *name)
"cpu_startup_entry",
"idle_cpu",
"intel_idle",
+ "intel_idle_ibrs",
"default_idle",
"native_safe_halt",
"enter_idle",
"exit_idle",
"mwait_idle",
"mwait_idle_with_hints",
+ "mwait_idle_with_hints.constprop.0",
"poll_idle",
"ppc64_runlatch_off",
"pseries_dedicated_idle_sleep",
+ "psw_idle",
+ "psw_idle_exit",
NULL
};
int i;
@@ -687,11 +766,15 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
{
struct symbol *sym;
struct dso *dso = arg;
- struct rb_root_cached *root = &dso->symbols;
+ struct rb_root_cached *root = dso__symbols(dso);
if (!symbol_type__filter(type))
return 0;
+ /* Ignore local symbols for ARM modules */
+ if (name[0] == '$')
+ return 0;
+
/*
* module symbols are not sorted so we add all
* symbols, setting length to 0, and rely on
@@ -721,11 +804,10 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
{
- struct map *curr_map;
struct symbol *pos;
int count = 0;
- struct rb_root_cached old_root = dso->symbols;
- struct rb_root_cached *root = &dso->symbols;
+ struct rb_root_cached *root = dso__symbols(dso);
+ struct rb_root_cached old_root = *root;
struct rb_node *next = rb_first_cached(root);
if (!kmaps)
@@ -734,6 +816,8 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
*root = RB_ROOT_CACHED;
while (next) {
+ struct map *curr_map;
+ struct dso *curr_map_dso;
char *module;
pos = rb_entry(next, struct symbol, rb_node);
@@ -751,18 +835,19 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
symbol__delete(pos);
continue;
}
-
- pos->start -= curr_map->start - curr_map->pgoff;
- if (pos->end > curr_map->end)
- pos->end = curr_map->end;
+ curr_map_dso = map__dso(curr_map);
+ pos->start -= map__start(curr_map) - map__pgoff(curr_map);
+ if (pos->end > map__end(curr_map))
+ pos->end = map__end(curr_map);
if (pos->end)
- pos->end -= curr_map->start - curr_map->pgoff;
- symbols__insert(&curr_map->dso->symbols, pos);
+ pos->end -= map__start(curr_map) - map__pgoff(curr_map);
+ symbols__insert(dso__symbols(curr_map_dso), pos);
++count;
+ map__put(curr_map);
}
/* Symbols have been adjusted */
- dso->adjust_symbols = 1;
+ dso__set_adjust_symbols(dso, true);
return count;
}
@@ -776,10 +861,10 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
struct map *initial_map)
{
struct machine *machine;
- struct map *curr_map = initial_map;
+ struct map *curr_map = map__get(initial_map);
struct symbol *pos;
int count = 0, moved = 0;
- struct rb_root_cached *root = &dso->symbols;
+ struct rb_root_cached *root = dso__symbols(dso);
struct rb_node *next = rb_first_cached(root);
int kernel_range = 0;
bool x86_64;
@@ -787,7 +872,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
if (!kmaps)
return -1;
- machine = kmaps->machine;
+ machine = maps__machine(kmaps);
x86_64 = machine__is(machine, "x86_64");
@@ -799,14 +884,16 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
module = strchr(pos->name, '\t');
if (module) {
+ struct dso *curr_map_dso;
+
if (!symbol_conf.use_modules)
goto discard_symbol;
*module++ = '\0';
-
- if (strcmp(curr_map->dso->short_name, module)) {
- if (curr_map != initial_map &&
- dso->kernel == DSO_TYPE_GUEST_KERNEL &&
+ curr_map_dso = map__dso(curr_map);
+ if (strcmp(dso__short_name(curr_map_dso), module)) {
+ if (!RC_CHK_EQUAL(curr_map, initial_map) &&
+ dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST &&
machine__is_default_guest(machine)) {
/*
* We assume all symbols of a module are
@@ -815,20 +902,21 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
* symbols are in its kmap. Mark it as
* loaded.
*/
- dso__set_loaded(curr_map->dso);
+ dso__set_loaded(curr_map_dso);
}
+ map__zput(curr_map);
curr_map = maps__find_by_name(kmaps, module);
if (curr_map == NULL) {
pr_debug("%s/proc/{kallsyms,modules} "
"inconsistency while looking "
"for \"%s\" module!\n",
machine->root_dir, module);
- curr_map = initial_map;
+ curr_map = map__get(initial_map);
goto discard_symbol;
}
-
- if (curr_map->dso->loaded &&
+ curr_map_dso = map__dso(curr_map);
+ if (dso__loaded(curr_map_dso) &&
!machine__is_default_guest(machine))
goto discard_symbol;
}
@@ -836,8 +924,8 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
* So that we look just like we get from .ko files,
* i.e. not prelinked, relative to initial_map->start.
*/
- pos->start = curr_map->map_ip(curr_map, pos->start);
- pos->end = curr_map->map_ip(curr_map, pos->end);
+ pos->start = map__map_ip(curr_map, pos->start);
+ pos->end = map__map_ip(curr_map, pos->end);
} else if (x86_64 && is_entry_trampoline(pos->name)) {
/*
* These symbols are not needed anymore since the
@@ -848,7 +936,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
* symbols at this point.
*/
goto discard_symbol;
- } else if (curr_map != initial_map) {
+ } else if (!RC_CHK_EQUAL(curr_map, initial_map)) {
char dso_name[PATH_MAX];
struct dso *ndso;
@@ -859,11 +947,12 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
}
if (count == 0) {
- curr_map = initial_map;
+ map__zput(curr_map);
+ curr_map = map__get(initial_map);
goto add_symbol;
}
- if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
snprintf(dso_name, sizeof(dso_name),
"[guest.kernel].%d",
kernel_range++);
@@ -873,10 +962,11 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
kernel_range++);
ndso = dso__new(dso_name);
+ map__zput(curr_map);
if (ndso == NULL)
return -1;
- ndso->kernel = dso->kernel;
+ dso__set_kernel(ndso, dso__kernel(dso));
curr_map = map__new2(pos->start, ndso);
if (curr_map == NULL) {
@@ -884,8 +974,12 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
return -1;
}
- curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
- maps__insert(kmaps, curr_map);
+ map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
+ if (maps__insert(kmaps, curr_map)) {
+ map__zput(curr_map);
+ dso__put(ndso);
+ return -1;
+ }
++kernel_range;
} else if (delta) {
/* Kernel was relocated at boot time */
@@ -893,9 +987,11 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
pos->end -= delta;
}
add_symbol:
- if (curr_map != initial_map) {
+ if (!RC_CHK_EQUAL(curr_map, initial_map)) {
+ struct dso *curr_map_dso = map__dso(curr_map);
+
rb_erase_cached(&pos->rb_node, root);
- symbols__insert(&curr_map->dso->symbols, pos);
+ symbols__insert(dso__symbols(curr_map_dso), pos);
++moved;
} else
++count;
@@ -906,12 +1002,12 @@ discard_symbol:
symbol__delete(pos);
}
- if (curr_map != initial_map &&
- dso->kernel == DSO_TYPE_GUEST_KERNEL &&
- machine__is_default_guest(kmaps->machine)) {
- dso__set_loaded(curr_map->dso);
+ if (!RC_CHK_EQUAL(curr_map, initial_map) &&
+ dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST &&
+ machine__is_default_guest(maps__machine(kmaps))) {
+ dso__set_loaded(map__dso(curr_map));
}
-
+ map__put(curr_map);
return count + moved;
}
@@ -1070,31 +1166,35 @@ out_delete_from:
return ret;
}
+static int do_validate_kcore_modules_cb(struct map *old_map, void *data)
+{
+ struct rb_root *modules = data;
+ struct module_info *mi;
+ struct dso *dso;
+
+ if (!__map__is_kmodule(old_map))
+ return 0;
+
+ dso = map__dso(old_map);
+ /* Module must be in memory at the same address */
+ mi = find_module(dso__short_name(dso), modules);
+ if (!mi || mi->start != map__start(old_map))
+ return -EINVAL;
+
+ return 0;
+}
+
static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
{
struct rb_root modules = RB_ROOT;
- struct map *old_map;
int err;
err = read_proc_modules(filename, &modules);
if (err)
return err;
- maps__for_each_entry(kmaps, old_map) {
- struct module_info *mi;
-
- if (!__map__is_kmodule(old_map)) {
- continue;
- }
+ err = maps__for_each_map(kmaps, do_validate_kcore_modules_cb, &modules);
- /* Module must be in memory at the same address */
- mi = find_module(old_map->dso->short_name, &modules);
- if (!mi || mi->start != old_map->start) {
- err = -EINVAL;
- goto out;
- }
- }
-out:
delete_modules(&modules);
return err;
}
@@ -1172,97 +1272,34 @@ struct kcore_mapfn_data {
static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
{
struct kcore_mapfn_data *md = data;
- struct map *map;
+ struct map_list_node *list_node = map_list_node__new();
- map = map__new2(start, md->dso);
- if (map == NULL)
+ if (!list_node)
return -ENOMEM;
- map->end = map->start + len;
- map->pgoff = pgoff;
+ list_node->map = map__new2(start, md->dso);
+ if (!list_node->map) {
+ free(list_node);
+ return -ENOMEM;
+ }
+
+ map__set_end(list_node->map, map__start(list_node->map) + len);
+ map__set_pgoff(list_node->map, pgoff);
- list_add(&map->node, &md->maps);
+ list_add(&list_node->node, &md->maps);
return 0;
}
-/*
- * Merges map into maps by splitting the new map within the existing map
- * regions.
- */
-int maps__merge_in(struct maps *kmaps, struct map *new_map)
+static bool remove_old_maps(struct map *map, void *data)
{
- struct map *old_map;
- LIST_HEAD(merged);
-
- maps__for_each_entry(kmaps, old_map) {
- /* no overload with this one */
- if (new_map->end < old_map->start ||
- new_map->start >= old_map->end)
- continue;
+ const struct map *map_to_save = data;
- if (new_map->start < old_map->start) {
- /*
- * |new......
- * |old....
- */
- if (new_map->end < old_map->end) {
- /*
- * |new......| -> |new..|
- * |old....| -> |old....|
- */
- new_map->end = old_map->start;
- } else {
- /*
- * |new.............| -> |new..| |new..|
- * |old....| -> |old....|
- */
- struct map *m = map__clone(new_map);
-
- if (!m)
- return -ENOMEM;
-
- m->end = old_map->start;
- list_add_tail(&m->node, &merged);
- new_map->pgoff += old_map->end - new_map->start;
- new_map->start = old_map->end;
- }
- } else {
- /*
- * |new......
- * |old....
- */
- if (new_map->end < old_map->end) {
- /*
- * |new..| -> x
- * |old.........| -> |old.........|
- */
- map__put(new_map);
- new_map = NULL;
- break;
- } else {
- /*
- * |new......| -> |new...|
- * |old....| -> |old....|
- */
- new_map->pgoff += old_map->end - new_map->start;
- new_map->start = old_map->end;
- }
- }
- }
-
- while (!list_empty(&merged)) {
- old_map = list_entry(merged.next, struct map, node);
- list_del_init(&old_map->node);
- maps__insert(kmaps, old_map);
- map__put(old_map);
- }
-
- if (new_map) {
- maps__insert(kmaps, new_map);
- map__put(new_map);
- }
- return 0;
+ /*
+ * We need to preserve eBPF maps even if they are covered by kcore,
+ * because we need to access eBPF dso for source data.
+ */
+ return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map);
}
static int dso__load_kcore(struct dso *dso, struct map *map,
@@ -1270,7 +1307,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
{
struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
- struct map *old_map, *new_map, *replacement_map = NULL, *next;
+ struct map *map_ref, *replacement_map = NULL;
struct machine *machine;
bool is_64_bit;
int err, fd;
@@ -1280,7 +1317,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
if (!kmaps)
return -EINVAL;
- machine = kmaps->machine;
+ machine = maps__machine(kmaps);
/* This function requires that the map is the kernel map */
if (!__map__is_kernel(map))
@@ -1305,11 +1342,11 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
}
/* Read new maps into temporary lists */
- err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
+ err = file__read_maps(fd, map__prot(map) & PROT_EXEC, kcore_mapfn, &md,
&is_64_bit);
if (err)
goto out_err;
- dso->is_64_bit = is_64_bit;
+ dso__set_is_64_bit(dso, is_64_bit);
if (list_empty(&md.maps)) {
err = -EINVAL;
@@ -1317,55 +1354,75 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
}
/* Remove old maps */
- maps__for_each_entry_safe(kmaps, old_map, next) {
- /*
- * We need to preserve eBPF maps even if they are
- * covered by kcore, because we need to access
- * eBPF dso for source data.
- */
- if (old_map != map && !__map__is_bpf_prog(old_map))
- maps__remove(kmaps, old_map);
- }
+ maps__remove_maps(kmaps, remove_old_maps, map);
machine->trampolines_mapped = false;
/* Find the kernel map using the '_stext' symbol */
if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
- list_for_each_entry(new_map, &md.maps, node) {
- if (stext >= new_map->start && stext < new_map->end) {
+ u64 replacement_size = 0;
+ struct map_list_node *new_node;
+
+ list_for_each_entry(new_node, &md.maps, node) {
+ struct map *new_map = new_node->map;
+ u64 new_size = map__size(new_map);
+
+ if (!(stext >= map__start(new_map) && stext < map__end(new_map)))
+ continue;
+
+ /*
+ * On some architectures, ARM64 for example, the kernel
+ * text can get allocated inside of the vmalloc segment.
+ * Select the smallest matching segment, in case stext
+ * falls within more than one in the list.
+ */
+ if (!replacement_map || new_size < replacement_size) {
replacement_map = new_map;
- break;
+ replacement_size = new_size;
}
}
}
if (!replacement_map)
- replacement_map = list_entry(md.maps.next, struct map, node);
+ replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
+
+ /*
+ * Update addresses of vmlinux map. Re-insert it to ensure maps are
+ * correctly ordered. Do this before using maps__merge_in() for the
+ * remaining maps so vmlinux gets split if necessary.
+ */
+ map_ref = map__get(map);
+ maps__remove(kmaps, map_ref);
+
+ map__set_start(map_ref, map__start(replacement_map));
+ map__set_end(map_ref, map__end(replacement_map));
+ map__set_pgoff(map_ref, map__pgoff(replacement_map));
+ map__set_mapping_type(map_ref, map__mapping_type(replacement_map));
+
+ err = maps__insert(kmaps, map_ref);
+ map__put(map_ref);
+ if (err)
+ goto out_err;
/* Add new maps */
while (!list_empty(&md.maps)) {
- new_map = list_entry(md.maps.next, struct map, node);
- list_del_init(&new_map->node);
- if (new_map == replacement_map) {
- map->start = new_map->start;
- map->end = new_map->end;
- map->pgoff = new_map->pgoff;
- map->map_ip = new_map->map_ip;
- map->unmap_ip = new_map->unmap_ip;
- /* Ensure maps are correctly ordered */
- map__get(map);
- maps__remove(kmaps, map);
- maps__insert(kmaps, map);
- map__put(map);
- map__put(new_map);
- } else {
+ struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
+ struct map *new_map = new_node->map;
+
+ list_del_init(&new_node->node);
+
+ /* skip if replacement_map, already inserted above */
+ if (!RC_CHK_EQUAL(new_map, replacement_map)) {
/*
* Merge kcore map into existing maps,
* and ensure that current maps (eBPF)
* stay intact.
*/
- if (maps__merge_in(kmaps, new_map))
+ if (maps__merge_in(kmaps, new_map)) {
+ err = -EINVAL;
goto out_err;
+ }
}
+ free(new_node);
}
if (machine__is(machine, "x86_64")) {
@@ -1385,15 +1442,15 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
* Set the data type and long name so that kcore can be read via
* dso__data_read_addr().
*/
- if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
- dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KCORE);
else
- dso->binary_type = DSO_BINARY_TYPE__KCORE;
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__KCORE);
dso__set_long_name(dso, strdup(kcore_filename), true);
close(fd);
- if (map->prot & PROT_EXEC)
+ if (map__prot(map) & PROT_EXEC)
pr_debug("Using %s for kernel object code\n", kcore_filename);
else
pr_debug("Using %s for kernel data\n", kcore_filename);
@@ -1402,12 +1459,15 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
out_err:
while (!list_empty(&md.maps)) {
- map = list_entry(md.maps.next, struct map, node);
- list_del_init(&map->node);
- map__put(map);
+ struct map_list_node *list_node;
+
+ list_node = list_entry(md.maps.next, struct map_list_node, node);
+ list_del_init(&list_node->node);
+ map__zput(list_node->map);
+ free(list_node);
}
close(fd);
- return -EINVAL;
+ return err;
}
/*
@@ -1446,13 +1506,13 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
if (kallsyms__delta(kmap, filename, &delta))
return -1;
- symbols__fixup_end(&dso->symbols);
- symbols__fixup_duplicate(&dso->symbols);
+ symbols__fixup_end(dso__symbols(dso), true);
+ symbols__fixup_duplicate(dso__symbols(dso));
- if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
- dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
+ dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS);
else
- dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
+ dso__set_symtab_type(dso, DSO_BINARY_TYPE__KALLSYMS);
if (!no_kcore && !dso__load_kcore(dso, map, filename))
return maps__split_kallsyms_for_kcore(kmap->kmaps, dso);
@@ -1508,7 +1568,7 @@ static int dso__load_perf_map(const char *map_path, struct dso *dso)
if (sym == NULL)
goto out_delete_line;
- symbols__insert(&dso->symbols, sym);
+ symbols__insert(dso__symbols(dso), sym);
nr_syms++;
}
@@ -1523,6 +1583,137 @@ out_failure:
return -1;
}
+#ifdef HAVE_LIBBFD_SUPPORT
+#define PACKAGE 'perf'
+#include <bfd.h>
+
+static int bfd_symbols__cmpvalue(const void *a, const void *b)
+{
+ const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b;
+
+ if (bfd_asymbol_value(as) != bfd_asymbol_value(bs))
+ return bfd_asymbol_value(as) - bfd_asymbol_value(bs);
+
+ return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0];
+}
+
+static int bfd2elf_binding(asymbol *symbol)
+{
+ if (symbol->flags & BSF_WEAK)
+ return STB_WEAK;
+ if (symbol->flags & BSF_GLOBAL)
+ return STB_GLOBAL;
+ if (symbol->flags & BSF_LOCAL)
+ return STB_LOCAL;
+ return -1;
+}
+
+int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
+{
+ int err = -1;
+ long symbols_size, symbols_count, i;
+ asection *section;
+ asymbol **symbols, *sym;
+ struct symbol *symbol;
+ bfd *abfd;
+ u64 start, len;
+
+ abfd = bfd_openr(debugfile, NULL);
+ if (!abfd)
+ return -1;
+
+ if (!bfd_check_format(abfd, bfd_object)) {
+ pr_debug2("%s: cannot read %s bfd file.\n", __func__,
+ dso__long_name(dso));
+ goto out_close;
+ }
+
+ if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
+ goto out_close;
+
+ symbols_size = bfd_get_symtab_upper_bound(abfd);
+ if (symbols_size == 0) {
+ bfd_close(abfd);
+ return 0;
+ }
+
+ if (symbols_size < 0)
+ goto out_close;
+
+ symbols = malloc(symbols_size);
+ if (!symbols)
+ goto out_close;
+
+ symbols_count = bfd_canonicalize_symtab(abfd, symbols);
+ if (symbols_count < 0)
+ goto out_free;
+
+ section = bfd_get_section_by_name(abfd, ".text");
+ if (section) {
+ for (i = 0; i < symbols_count; ++i) {
+ if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") ||
+ !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__"))
+ break;
+ }
+ if (i < symbols_count) {
+ /* PE symbols can only have 4 bytes, so use .text high bits */
+ u64 text_offset = (section->vma - (u32)section->vma)
+ + (u32)bfd_asymbol_value(symbols[i]);
+ dso__set_text_offset(dso, text_offset);
+ dso__set_text_end(dso, (section->vma - text_offset) + section->size);
+ } else {
+ dso__set_text_offset(dso, section->vma - section->filepos);
+ dso__set_text_end(dso, section->filepos + section->size);
+ }
+ }
+
+ qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
+
+#ifdef bfd_get_section
+#define bfd_asymbol_section bfd_get_section
+#endif
+ for (i = 0; i < symbols_count; ++i) {
+ sym = symbols[i];
+ section = bfd_asymbol_section(sym);
+ if (bfd2elf_binding(sym) < 0)
+ continue;
+
+ while (i + 1 < symbols_count &&
+ bfd_asymbol_section(symbols[i + 1]) == section &&
+ bfd2elf_binding(symbols[i + 1]) < 0)
+ i++;
+
+ if (i + 1 < symbols_count &&
+ bfd_asymbol_section(symbols[i + 1]) == section)
+ len = symbols[i + 1]->value - sym->value;
+ else
+ len = section->size - sym->value;
+
+ start = bfd_asymbol_value(sym) - dso__text_offset(dso);
+ symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
+ bfd_asymbol_name(sym));
+ if (!symbol)
+ goto out_free;
+
+ symbols__insert(dso__symbols(dso), symbol);
+ }
+#ifdef bfd_get_section
+#undef bfd_asymbol_section
+#endif
+
+ symbols__fixup_end(dso__symbols(dso), false);
+ symbols__fixup_duplicate(dso__symbols(dso));
+ dso__set_adjust_symbols(dso, true);
+
+ err = 0;
+out_free:
+ free(symbols);
+out_close:
+ bfd_close(abfd);
+ return err;
+}
+#endif
+
static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
enum dso_binary_type type)
{
@@ -1535,17 +1726,18 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
- return !kmod && dso->kernel == DSO_TYPE_USER;
+ case DSO_BINARY_TYPE__GNU_DEBUGDATA:
+ return !kmod && dso__kernel(dso) == DSO_SPACE__USER;
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__KCORE:
- return dso->kernel == DSO_TYPE_KERNEL;
+ return dso__kernel(dso) == DSO_SPACE__KERNEL;
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__GUEST_KCORE:
- return dso->kernel == DSO_TYPE_GUEST_KERNEL;
+ return dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST;
case DSO_BINARY_TYPE__GUEST_KMODULE:
case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
@@ -1555,7 +1747,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
* kernel modules know their symtab type - it's set when
* creating a module dso in machine__addnew_module_map().
*/
- return kmod && dso->symtab_type == type;
+ return kmod && dso__symtab_type(dso) == type;
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
@@ -1563,6 +1755,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__BPF_IMAGE:
+ case DSO_BINARY_TYPE__OOL:
case DSO_BINARY_TYPE__NOT_FOUND:
default:
return false;
@@ -1585,8 +1778,8 @@ static int dso__find_perf_map(char *filebuf, size_t bufsz,
nsi = *nsip;
- if (nsi->need_setns) {
- snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
+ if (nsinfo__need_setns(nsi)) {
+ snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__nstgid(nsi));
nsinfo__mountns_enter(nsi, &nsc);
rc = access(filebuf, R_OK);
nsinfo__mountns_exit(&nsc);
@@ -1598,8 +1791,8 @@ static int dso__find_perf_map(char *filebuf, size_t bufsz,
if (nnsi) {
nsinfo__put(nsi);
- nnsi->need_setns = false;
- snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
+ nsinfo__clear_need_setns(nnsi);
+ snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__tgid(nnsi));
*nsip = nnsi;
rc = 0;
}
@@ -1619,21 +1812,23 @@ int dso__load(struct dso *dso, struct map *map)
struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
bool kmod;
bool perfmap;
- unsigned char build_id[BUILD_ID_SIZE];
+ struct build_id bid;
struct nscookie nsc;
char newmapname[PATH_MAX];
- const char *map_path = dso->long_name;
+ const char *map_path = dso__long_name(dso);
+
+ mutex_lock(dso__lock(dso));
+ perfmap = is_perf_pid_map_name(map_path);
- perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
if (perfmap) {
- if (dso->nsinfo && (dso__find_perf_map(newmapname,
- sizeof(newmapname), &dso->nsinfo) == 0)) {
+ if (dso__nsinfo(dso) &&
+ (dso__find_perf_map(newmapname, sizeof(newmapname),
+ dso__nsinfo_ptr(dso)) == 0)) {
map_path = newmapname;
}
}
- nsinfo__mountns_enter(dso->nsinfo, &nsc);
- pthread_mutex_lock(&dso->lock);
+ nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
/* check again under the dso->lock */
if (dso__loaded(dso)) {
@@ -1641,29 +1836,27 @@ int dso__load(struct dso *dso, struct map *map)
goto out;
}
- kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
- dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
- dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
- dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
+ kmod = dso__is_kmod(dso);
- if (dso->kernel && !kmod) {
- if (dso->kernel == DSO_TYPE_KERNEL)
+ if (dso__kernel(dso) && !kmod) {
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL)
ret = dso__load_kernel_sym(dso, map);
- else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ else if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
ret = dso__load_guest_kernel_sym(dso, map);
- machine = map__kmaps(map)->machine;
+ machine = maps__machine(map__kmaps(map));
if (machine__is(machine, "x86_64"))
machine__map_x86_64_entry_trampolines(machine, dso);
goto out;
}
- dso->adjust_symbols = 0;
+ dso__set_adjust_symbols(dso, false);
if (perfmap) {
ret = dso__load_perf_map(map_path, dso);
- dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
- DSO_BINARY_TYPE__NOT_FOUND;
+ dso__set_symtab_type(dso, ret > 0
+ ? DSO_BINARY_TYPE__JAVA_JIT
+ : DSO_BINARY_TYPE__NOT_FOUND);
goto out;
}
@@ -1678,11 +1871,11 @@ int dso__load(struct dso *dso, struct map *map)
* Read the build id if possible. This is required for
* DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
*/
- if (!dso->has_build_id &&
- is_regular_file(dso->long_name)) {
- __symbol__join_symfs(name, PATH_MAX, dso->long_name);
- if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
- dso__set_build_id(dso, build_id);
+ if (!dso__has_build_id(dso) &&
+ is_regular_file(dso__long_name(dso))) {
+ __symbol__join_symfs(name, PATH_MAX, dso__long_name(dso));
+ if (filename__read_build_id(name, &bid) > 0)
+ dso__set_build_id(dso, &bid);
}
/*
@@ -1695,6 +1888,7 @@ int dso__load(struct dso *dso, struct map *map)
bool next_slot = false;
bool is_reg;
bool nsexit;
+ int bfdrc = -1;
int sirc = -1;
enum dso_binary_type symtab_type = binary_type_symtab[i];
@@ -1713,11 +1907,29 @@ int dso__load(struct dso *dso, struct map *map)
nsinfo__mountns_exit(&nsc);
is_reg = is_regular_file(name);
+ if (!is_reg && errno == ENOENT && dso__nsinfo(dso)) {
+ char *new_name = dso__filename_with_chroot(dso, name);
+ if (new_name) {
+ is_reg = is_regular_file(new_name);
+ strlcpy(name, new_name, PATH_MAX);
+ free(new_name);
+ }
+ }
+
+#ifdef HAVE_LIBBFD_SUPPORT
if (is_reg)
+ bfdrc = dso__load_bfd_symbols(dso, name);
+#endif
+ if (is_reg && bfdrc < 0)
sirc = symsrc__init(ss, dso, name, symtab_type);
if (nsexit)
- nsinfo__mountns_enter(dso->nsinfo, &nsc);
+ nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
+
+ if (bfdrc == 0) {
+ ret = 0;
+ break;
+ }
if (!is_reg || sirc < 0)
continue;
@@ -1725,8 +1937,8 @@ int dso__load(struct dso *dso, struct map *map)
if (!syms_ss && symsrc__has_symtab(ss)) {
syms_ss = ss;
next_slot = true;
- if (!dso->symsrc_filename)
- dso->symsrc_filename = strdup(name);
+ if (!dso__symsrc_filename(dso))
+ dso__set_symsrc_filename(dso, strdup(name));
}
if (!runtime_ss && symsrc__possibly_runtime(ss)) {
@@ -1737,6 +1949,9 @@ int dso__load(struct dso *dso, struct map *map)
if (next_slot) {
ss_pos++;
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__NOT_FOUND)
+ dso__set_binary_type(dso, symtab_type);
+
if (syms_ss && runtime_ss)
break;
} else {
@@ -1773,99 +1988,20 @@ int dso__load(struct dso *dso, struct map *map)
symsrc__destroy(&ss_[ss_pos - 1]);
out_free:
free(name);
- if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
+ if (ret < 0 && strstr(dso__name(dso), " (deleted)") != NULL)
ret = 0;
out:
dso__set_loaded(dso);
- pthread_mutex_unlock(&dso->lock);
+ mutex_unlock(dso__lock(dso));
nsinfo__mountns_exit(&nsc);
return ret;
}
-static int map__strcmp(const void *a, const void *b)
-{
- const struct map *ma = *(const struct map **)a, *mb = *(const struct map **)b;
- return strcmp(ma->dso->short_name, mb->dso->short_name);
-}
-
-static int map__strcmp_name(const void *name, const void *b)
-{
- const struct map *map = *(const struct map **)b;
- return strcmp(name, map->dso->short_name);
-}
-
-void __maps__sort_by_name(struct maps *maps)
-{
- qsort(maps->maps_by_name, maps->nr_maps, sizeof(struct map *), map__strcmp);
-}
-
-static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
-{
- struct map *map;
- struct map **maps_by_name = realloc(maps->maps_by_name, maps->nr_maps * sizeof(map));
- int i = 0;
-
- if (maps_by_name == NULL)
- return -1;
-
- maps->maps_by_name = maps_by_name;
- maps->nr_maps_allocated = maps->nr_maps;
-
- maps__for_each_entry(maps, map)
- maps_by_name[i++] = map;
-
- __maps__sort_by_name(maps);
- return 0;
-}
-
-static struct map *__maps__find_by_name(struct maps *maps, const char *name)
-{
- struct map **mapp;
-
- if (maps->maps_by_name == NULL &&
- map__groups__sort_by_name_from_rbtree(maps))
- return NULL;
-
- mapp = bsearch(name, maps->maps_by_name, maps->nr_maps, sizeof(*mapp), map__strcmp_name);
- if (mapp)
- return *mapp;
- return NULL;
-}
-
-struct map *maps__find_by_name(struct maps *maps, const char *name)
-{
- struct map *map;
-
- down_read(&maps->lock);
-
- if (maps->last_search_by_name && strcmp(maps->last_search_by_name->dso->short_name, name) == 0) {
- map = maps->last_search_by_name;
- goto out_unlock;
- }
- /*
- * If we have maps->maps_by_name, then the name isn't in the rbtree,
- * as maps->maps_by_name mirrors the rbtree when lookups by name are
- * made.
- */
- map = __maps__find_by_name(maps, name);
- if (map || maps->maps_by_name != NULL)
- goto out_unlock;
-
- /* Fallback to traversing the rbtree... */
- maps__for_each_entry(maps, map)
- if (strcmp(map->dso->short_name, name) == 0) {
- maps->last_search_by_name = map;
- goto out_unlock;
- }
-
- map = NULL;
-
-out_unlock:
- up_read(&maps->lock);
- return map;
-}
-
+/*
+ * Always takes ownership of vmlinux when vmlinux_allocated == true, even if
+ * it returns an error.
+ */
int dso__load_vmlinux(struct dso *dso, struct map *map,
const char *vmlinux, bool vmlinux_allocated)
{
@@ -1879,23 +2015,31 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
else
symbol__join_symfs(symfs_vmlinux, vmlinux);
- if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
else
symtab_type = DSO_BINARY_TYPE__VMLINUX;
- if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
+ if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) {
+ if (vmlinux_allocated)
+ free((char *) vmlinux);
return -1;
+ }
+
+ /*
+ * dso__load_sym() may copy 'dso' which will result in the copies having
+ * an incorrect long name unless we set it here first.
+ */
+ dso__set_long_name(dso, vmlinux, vmlinux_allocated);
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_VMLINUX);
+ else
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__VMLINUX);
err = dso__load_sym(dso, map, &ss, &ss, 0);
symsrc__destroy(&ss);
if (err > 0) {
- if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
- dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
- else
- dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
- dso__set_long_name(dso, vmlinux, vmlinux_allocated);
dso__set_loaded(dso);
pr_debug("Using %s for symbols\n", symfs_vmlinux);
}
@@ -1923,7 +2067,6 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map)
err = dso__load_vmlinux(dso, map, filename, true);
if (err > 0)
goto out;
- free(filename);
}
out:
return err;
@@ -1978,12 +2121,12 @@ static bool filename__readable(const char *file)
static char *dso__find_kallsyms(struct dso *dso, struct map *map)
{
- u8 host_build_id[BUILD_ID_SIZE];
+ struct build_id bid;
char sbuild_id[SBUILD_ID_SIZE];
bool is_host = false;
char path[PATH_MAX];
- if (!dso->has_build_id) {
+ if (!dso__has_build_id(dso)) {
/*
* Last resort, if we don't have a build-id and couldn't find
* any vmlinux file, try the running kernel kallsyms table.
@@ -1991,9 +2134,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
goto proc_kallsyms;
}
- if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
- sizeof(host_build_id)) == 0)
- is_host = dso__build_id_equal(dso, host_build_id);
+ if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0)
+ is_host = dso__build_id_equal(dso, &bid);
/* Try a fast path for /proc/kallsyms if possible */
if (is_host) {
@@ -2009,7 +2151,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
goto proc_kallsyms;
}
- build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
+ build_id__sprintf(dso__bid(dso), sbuild_id);
/* Find kallsyms in build-id cache with kcore */
scnprintf(path, sizeof(path), "%s/%s/%s",
@@ -2039,6 +2181,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map)
int err;
const char *kallsyms_filename = NULL;
char *kallsyms_allocated_filename = NULL;
+ char *filename = NULL;
+
/*
* Step 1: if the user specified a kallsyms or vmlinux filename, use
* it and only it, reporting errors to the user if it cannot be used.
@@ -2063,6 +2207,19 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map)
return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
}
+ /*
+ * Before checking on common vmlinux locations, check if it's
+ * stored as standard build id binary (not kallsyms) under
+ * .debug cache.
+ */
+ if (!symbol_conf.ignore_vmlinux_buildid)
+ filename = __dso__build_id_filename(dso, NULL, 0, false, false);
+ if (filename != NULL) {
+ err = dso__load_vmlinux(dso, map, filename, true);
+ if (err > 0)
+ return err;
+ }
+
if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
err = dso__load_vmlinux_path(dso, map);
if (err > 0)
@@ -2086,7 +2243,7 @@ do_kallsyms:
free(kallsyms_allocated_filename);
if (err > 0 && !dso__is_kcore(dso)) {
- dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__KALLSYMS);
dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
map__fixup_start(map);
map__fixup_end(map);
@@ -2098,11 +2255,13 @@ do_kallsyms:
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
{
int err;
- const char *kallsyms_filename = NULL;
- struct machine *machine = map__kmaps(map)->machine;
+ const char *kallsyms_filename;
+ struct machine *machine = maps__machine(map__kmaps(map));
char path[PATH_MAX];
- if (machine__is_default_guest(machine)) {
+ if (machine->kallsyms_filename) {
+ kallsyms_filename = machine->kallsyms_filename;
+ } else if (machine__is_default_guest(machine)) {
/*
* if the user specified a vmlinux filename, use it and only
* it, reporting errors to the user if it cannot be used.
@@ -2127,7 +2286,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
if (err > 0)
pr_debug("Using %s for symbols\n", kallsyms_filename);
if (err > 0 && !dso__is_kcore(dso)) {
- dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS);
dso__set_long_name(dso, machine->mmap_name, false);
map__fixup_start(map);
map__fixup_end(map);
@@ -2240,18 +2399,61 @@ int setup_intlist(struct intlist **list, const char *list_str,
return 0;
}
+static int setup_addrlist(struct intlist **addr_list, struct strlist *sym_list)
+{
+ struct str_node *pos, *tmp;
+ unsigned long val;
+ char *sep;
+ const char *end;
+ int i = 0, err;
+
+ *addr_list = intlist__new(NULL);
+ if (!*addr_list)
+ return -1;
+
+ strlist__for_each_entry_safe(pos, tmp, sym_list) {
+ errno = 0;
+ val = strtoul(pos->s, &sep, 16);
+ if (errno || (sep == pos->s))
+ continue;
+
+ if (*sep != '\0') {
+ end = pos->s + strlen(pos->s) - 1;
+ while (end >= sep && isspace(*end))
+ end--;
+
+ if (end >= sep)
+ continue;
+ }
+
+ err = intlist__add(*addr_list, val);
+ if (err)
+ break;
+
+ strlist__remove(sym_list, pos);
+ i++;
+ }
+
+ if (i == 0) {
+ intlist__delete(*addr_list);
+ *addr_list = NULL;
+ }
+
+ return 0;
+}
+
static bool symbol__read_kptr_restrict(void)
{
bool value = false;
FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
+ bool used_root;
+ bool cap_syslog = perf_cap__capable(CAP_SYSLOG, &used_root);
if (fp != NULL) {
char line[8];
if (fgets(line, sizeof(line), fp) != NULL)
- value = perf_cap__capable(CAP_SYSLOG) ?
- (atoi(line) >= 2) :
- (atoi(line) != 0);
+ value = cap_syslog ? (atoi(line) >= 2) : (atoi(line) != 0);
fclose(fp);
}
@@ -2259,7 +2461,7 @@ static bool symbol__read_kptr_restrict(void)
/* Per kernel/kallsyms.c:
* we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
*/
- if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
+ if (perf_event_paranoid() > 1 && !cap_syslog)
value = true;
return value;
@@ -2280,6 +2482,36 @@ int symbol__annotation_init(void)
return 0;
}
+static int setup_parallelism_bitmap(void)
+{
+ struct perf_cpu_map *map;
+ struct perf_cpu cpu;
+ int i, err = -1;
+
+ if (symbol_conf.parallelism_list_str == NULL)
+ return 0;
+
+ map = perf_cpu_map__new(symbol_conf.parallelism_list_str);
+ if (map == NULL) {
+ pr_err("failed to parse parallelism filter list\n");
+ return -1;
+ }
+
+ bitmap_fill(symbol_conf.parallelism_filter, MAX_NR_CPUS + 1);
+ perf_cpu_map__for_each_cpu(cpu, i, map) {
+ if (cpu.cpu <= 0 || cpu.cpu > MAX_NR_CPUS) {
+ pr_err("Requested parallelism level %d is invalid.\n", cpu.cpu);
+ goto out_delete_map;
+ }
+ __clear_bit(cpu.cpu, symbol_conf.parallelism_filter);
+ }
+
+ err = 0;
+out_delete_map:
+ perf_cpu_map__put(map);
+ return err;
+}
+
int symbol__init(struct perf_env *env)
{
const char *symfs;
@@ -2291,10 +2523,6 @@ int symbol__init(struct perf_env *env)
symbol__elf_init();
- if (symbol_conf.sort_by_name)
- symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
- sizeof(struct symbol));
-
if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
return -1;
@@ -2303,6 +2531,9 @@ int symbol__init(struct perf_env *env)
return -1;
}
+ if (setup_parallelism_bitmap())
+ return -1;
+
if (setup_list(&symbol_conf.dso_list,
symbol_conf.dso_list_str, "dso") < 0)
return -1;
@@ -2323,6 +2554,10 @@ int symbol__init(struct perf_env *env)
symbol_conf.sym_list_str, "symbol") < 0)
goto out_free_tid_list;
+ if (symbol_conf.sym_list &&
+ setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0)
+ goto out_free_sym_list;
+
if (setup_list(&symbol_conf.bt_stop_list,
symbol_conf.bt_stop_list_str, "symbol") < 0)
goto out_free_sym_list;
@@ -2346,6 +2581,7 @@ int symbol__init(struct perf_env *env)
out_free_sym_list:
strlist__delete(symbol_conf.sym_list);
+ intlist__delete(symbol_conf.addr_list);
out_free_tid_list:
intlist__delete(symbol_conf.tid_list);
out_free_pid_list:
@@ -2367,6 +2603,7 @@ void symbol__exit(void)
strlist__delete(symbol_conf.comm_list);
intlist__delete(symbol_conf.tid_list);
intlist__delete(symbol_conf.pid_list);
+ intlist__delete(symbol_conf.addr_list);
vmlinux_path__exit();
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
symbol_conf.bt_stop_list = NULL;
@@ -2396,24 +2633,100 @@ int symbol__config_symfs(const struct option *opt __maybe_unused,
return 0;
}
-struct mem_info *mem_info__get(struct mem_info *mi)
+/*
+ * Checks that user supplied symbol kernel files are accessible because
+ * the default mechanism for accessing elf files fails silently. i.e. if
+ * debug syms for a build ID aren't found perf carries on normally. When
+ * they are user supplied we should assume that the user doesn't want to
+ * silently fail.
+ */
+int symbol__validate_sym_arguments(void)
{
- if (mi)
- refcount_inc(&mi->refcnt);
- return mi;
+ if (symbol_conf.vmlinux_name &&
+ access(symbol_conf.vmlinux_name, R_OK)) {
+ pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
+ return -EINVAL;
+ }
+ if (symbol_conf.kallsyms_name &&
+ access(symbol_conf.kallsyms_name, R_OK)) {
+ pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);
+ return -EINVAL;
+ }
+ return 0;
}
-void mem_info__put(struct mem_info *mi)
+static bool want_demangle(bool is_kernel_sym)
{
- if (mi && refcount_dec_and_test(&mi->refcnt))
- free(mi);
+ return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
+}
+
+/*
+ * Demangle C++ function signature, typically replaced by demangle-cxx.cpp
+ * version.
+ */
+#ifndef HAVE_CXA_DEMANGLE_SUPPORT
+char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused,
+ bool modifiers __maybe_unused)
+{
+#ifdef HAVE_LIBBFD_SUPPORT
+ int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
+
+ return bfd_demangle(NULL, str, flags);
+#elif defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
+ int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
+
+ return cplus_demangle(str, flags);
+#else
+ return NULL;
+#endif
}
+#endif /* !HAVE_CXA_DEMANGLE_SUPPORT */
-struct mem_info *mem_info__new(void)
+char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
{
- struct mem_info *mi = zalloc(sizeof(*mi));
+ struct demangle rust_demangle = {
+ .style = DemangleStyleUnknown,
+ };
+ char *demangled = NULL;
+
+ /*
+ * We need to figure out if the object was created from C++ sources
+ * DWARF DW_compile_unit has this, but we don't always have access
+ * to it...
+ */
+ if (!want_demangle((dso && dso__kernel(dso)) || kmodule))
+ return demangled;
+
+ rust_demangle_demangle(elf_name, &rust_demangle);
+ if (rust_demangle_is_known(&rust_demangle)) {
+ /* A rust mangled name. */
+ if (rust_demangle.mangled_len == 0)
+ return demangled;
+
+ for (size_t buf_len = roundup_pow_of_two(rust_demangle.mangled_len * 2);
+ buf_len < 1024 * 1024; buf_len += 32) {
+ char *tmp = realloc(demangled, buf_len);
+
+ if (!tmp) {
+ /* Failure to grow output buffer, return what is there. */
+ return demangled;
+ }
+ demangled = tmp;
+ if (rust_demangle_display_demangle(&rust_demangle, demangled, buf_len,
+ /*alternate=*/true) == OverflowOk)
+ return demangled;
+ }
+ /* Buffer exceeded sensible bounds, return what is there. */
+ return demangled;
+ }
+
+ demangled = cxx_demangle_sym(elf_name, verbose > 0, verbose > 0);
+ if (demangled)
+ return demangled;
+
+ demangled = ocaml_demangle_sym(elf_name);
+ if (demangled)
+ return demangled;
- if (mi)
- refcount_set(&mi->refcnt, 1);
- return mi;
+ return java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
}