aboutsummaryrefslogtreecommitdiffstats
path: root/tools/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/api/fd/array.c3
-rw-r--r--tools/lib/argv_split.c100
-rw-r--r--tools/lib/bitmap.c4
-rw-r--r--tools/lib/bpf/Build4
-rw-r--r--tools/lib/bpf/Makefile36
-rw-r--r--tools/lib/bpf/README.rst3
-rw-r--r--tools/lib/bpf/bpf.c32
-rw-r--r--tools/lib/bpf/bpf.h2
-rw-r--r--tools/lib/bpf/bpf_prog_linfo.c5
-rw-r--r--tools/lib/bpf/btf.c585
-rw-r--r--tools/lib/bpf/btf.h202
-rw-r--r--tools/lib/bpf/btf_dump.c1287
-rw-r--r--tools/lib/bpf/hashmap.c229
-rw-r--r--tools/lib/bpf/hashmap.h178
-rw-r--r--tools/lib/bpf/libbpf.c2860
-rw-r--r--tools/lib/bpf/libbpf.h158
-rw-r--r--tools/lib/bpf/libbpf.map26
-rw-r--r--tools/lib/bpf/libbpf_internal.h131
-rw-r--r--tools/lib/bpf/libbpf_probes.c15
-rw-r--r--tools/lib/bpf/libbpf_util.h13
-rw-r--r--tools/lib/bpf/str_error.c2
-rw-r--r--tools/lib/bpf/xsk.c214
-rw-r--r--tools/lib/bpf/xsk.h35
-rw-r--r--tools/lib/ctype.c35
-rw-r--r--tools/lib/find_bit.c6
-rw-r--r--tools/lib/rbtree.c14
-rw-r--r--tools/lib/string.c55
-rw-r--r--tools/lib/symbol/kallsyms.c14
-rw-r--r--tools/lib/symbol/kallsyms.h2
-rw-r--r--tools/lib/traceevent/Makefile10
-rw-r--r--tools/lib/traceevent/event-parse-api.c40
-rw-r--r--tools/lib/traceevent/event-parse-local.h6
-rw-r--r--tools/lib/traceevent/event-parse.c391
-rw-r--r--tools/lib/traceevent/event-parse.h30
-rw-r--r--tools/lib/traceevent/event-plugin.c2
-rw-r--r--tools/lib/vsprintf.c19
-rw-r--r--tools/lib/zalloc.c15
37 files changed, 5669 insertions, 1094 deletions
diff --git a/tools/lib/api/fd/array.c b/tools/lib/api/fd/array.c
index b0a035fc87b3..58d44d5eee31 100644
--- a/tools/lib/api/fd/array.c
+++ b/tools/lib/api/fd/array.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include "array.h"
#include <errno.h>
diff --git a/tools/lib/argv_split.c b/tools/lib/argv_split.c
new file mode 100644
index 000000000000..0a58ccf3f761
--- /dev/null
+++ b/tools/lib/argv_split.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper function for splitting a string into an argv-like array.
+ */
+
+#include <stdlib.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+
+static const char *skip_arg(const char *cp)
+{
+ while (*cp && !isspace(*cp))
+ cp++;
+
+ return cp;
+}
+
+static int count_argc(const char *str)
+{
+ int count = 0;
+
+ while (*str) {
+ str = skip_spaces(str);
+ if (*str) {
+ count++;
+ str = skip_arg(str);
+ }
+ }
+
+ return count;
+}
+
+/**
+ * argv_free - free an argv
+ * @argv - the argument vector to be freed
+ *
+ * Frees an argv and the strings it points to.
+ */
+void argv_free(char **argv)
+{
+ char **p;
+ for (p = argv; *p; p++) {
+ free(*p);
+ *p = NULL;
+ }
+
+ free(argv);
+}
+
+/**
+ * argv_split - split a string at whitespace, returning an argv
+ * @str: the string to be split
+ * @argcp: returned argument count
+ *
+ * Returns an array of pointers to strings which are split out from
+ * @str. This is performed by strictly splitting on white-space; no
+ * quote processing is performed. Multiple whitespace characters are
+ * considered to be a single argument separator. The returned array
+ * is always NULL-terminated. Returns NULL on memory allocation
+ * failure.
+ */
+char **argv_split(const char *str, int *argcp)
+{
+ int argc = count_argc(str);
+ char **argv = calloc(argc + 1, sizeof(*argv));
+ char **argvp;
+
+ if (argv == NULL)
+ goto out;
+
+ if (argcp)
+ *argcp = argc;
+
+ argvp = argv;
+
+ while (*str) {
+ str = skip_spaces(str);
+
+ if (*str) {
+ const char *p = str;
+ char *t;
+
+ str = skip_arg(str);
+
+ t = strndup(p, str-p);
+ if (t == NULL)
+ goto fail;
+ *argvp++ = t;
+ }
+ }
+ *argvp = NULL;
+
+out:
+ return argv;
+
+fail:
+ argv_free(argv);
+ return NULL;
+}
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index 38748b0e342f..38494782be06 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -1,9 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* From lib/bitmap.c
* Helper functions for bitmap.h.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/bitmap.h>
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index ee9d5362f35b..e3962cfbc9a6 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1,3 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
+ netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
+ btf_dump.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index f91639bf5650..c6f94cffe06e 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
# Most of this file is copied from tools/lib/traceevent/Makefile
-BPF_VERSION = 0
-BPF_PATCHLEVEL = 0
-BPF_EXTRAVERSION = 3
+LIBBPF_VERSION := $(shell \
+ grep -oE '^LIBBPF_([0-9.]+)' libbpf.map | \
+ sort -rV | head -n1 | cut -d'_' -f2)
+LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION)))
MAKEFLAGS += --no-print-directory
@@ -79,15 +80,9 @@ export prefix libdir src obj
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-VERSION = $(BPF_VERSION)
-PATCHLEVEL = $(BPF_PATCHLEVEL)
-EXTRAVERSION = $(BPF_EXTRAVERSION)
-
OBJ = $@
N =
-LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
-
LIB_TARGET = libbpf.a libbpf.so.$(LIBBPF_VERSION)
LIB_FILE = libbpf.a libbpf.so*
PC_FILE = libbpf.pc
@@ -113,6 +108,7 @@ override CFLAGS += -Werror -Wall
override CFLAGS += -fPIC
override CFLAGS += $(INCLUDES)
override CFLAGS += -fvisibility=hidden
+override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
ifeq ($(VERBOSE),1)
Q =
@@ -138,7 +134,9 @@ LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE))
GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
- awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
+ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \
+ sort -u | wc -l)
VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
@@ -178,10 +176,10 @@ $(BPF_IN): force elfdep bpfdep
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
- $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
+ $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
-Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
@ln -sf $(@F) $(OUTPUT)libbpf.so
- @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
+ @ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
$(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
@@ -204,6 +202,17 @@ check_abi: $(OUTPUT)libbpf.so
"versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \
"Please make sure all LIBBPF_API symbols are" \
"versioned in $(VERSION_SCRIPT)." >&2; \
+ readelf -s --wide $(OUTPUT)libbpf-in.o | \
+ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \
+ sort -u > $(OUTPUT)libbpf_global_syms.tmp; \
+ readelf -s --wide $(OUTPUT)libbpf.so | \
+ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \
+ sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \
+ diff -u $(OUTPUT)libbpf_global_syms.tmp \
+ $(OUTPUT)libbpf_versioned_syms.tmp; \
+ rm $(OUTPUT)libbpf_global_syms.tmp \
+ $(OUTPUT)libbpf_versioned_syms.tmp; \
exit 1; \
fi
@@ -247,7 +256,8 @@ config-clean:
clean:
$(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
- *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd *.pc LIBBPF-CFLAGS
+ *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
+ *.pc LIBBPF-CFLAGS
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
index cef7b77eab69..8928f7787f2d 100644
--- a/tools/lib/bpf/README.rst
+++ b/tools/lib/bpf/README.rst
@@ -9,7 +9,8 @@ described here. It's recommended to follow these conventions whenever a
new function or type is added to keep libbpf API clean and consistent.
All types and functions provided by libbpf API should have one of the
-following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``.
+following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``,
+``perf_buffer_``.
System call wrappers
--------------------
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index c4a48086dc9a..cbb933532981 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -26,10 +26,11 @@
#include <memory.h>
#include <unistd.h>
#include <asm/unistd.h>
+#include <errno.h>
#include <linux/bpf.h>
#include "bpf.h"
#include "libbpf.h"
-#include <errno.h>
+#include "libbpf_internal.h"
/*
* When building perf, unistd.h is overridden. __NR_bpf is
@@ -53,10 +54,6 @@
# endif
#endif
-#ifndef min
-#define min(x, y) ((x) < (y) ? (x) : (y))
-#endif
-
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
@@ -256,6 +253,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
if (load_attr->name)
memcpy(attr.prog_name, load_attr->name,
min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
+ attr.prog_flags = load_attr->prog_flags;
fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0)
@@ -570,7 +568,7 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
return ret;
}
-int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
+static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
{
union bpf_attr attr;
int err;
@@ -578,26 +576,26 @@ int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
memset(&attr, 0, sizeof(attr));
attr.start_id = start_id;
- err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
+ err = sys_bpf(cmd, &attr, sizeof(attr));
if (!err)
*next_id = attr.next_id;
return err;
}
-int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
+int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
{
- union bpf_attr attr;
- int err;
-
- memset(&attr, 0, sizeof(attr));
- attr.start_id = start_id;
+ return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
+}
- err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
- if (!err)
- *next_id = attr.next_id;
+int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
+{
+ return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
+}
- return err;
+int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
+{
+ return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
}
int bpf_prog_get_fd_by_id(__u32 id)
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 9593fec75652..0db01334740f 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -87,6 +87,7 @@ struct bpf_load_program_attr {
const void *line_info;
__u32 line_info_cnt;
__u32 log_level;
+ __u32 prog_flags;
};
/* Flags to direct loading requirements */
@@ -155,6 +156,7 @@ LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data,
__u32 *retval, __u32 *duration);
LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
+LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c
index 6978314ea7f6..8c67561c93b0 100644
--- a/tools/lib/bpf/bpf_prog_linfo.c
+++ b/tools/lib/bpf/bpf_prog_linfo.c
@@ -6,10 +6,7 @@
#include <linux/err.h>
#include <linux/bpf.h>
#include "libbpf.h"
-
-#ifndef min
-#define min(x, y) ((x) < (y) ? (x) : (y))
-#endif
+#include "libbpf_internal.h"
struct bpf_prog_linfo {
void *raw_linfo;
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 75eaf10b9e1a..1aa189a9112a 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,31 +1,25 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
+#include <endian.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <linux/err.h>
#include <linux/btf.h>
+#include <gelf.h>
#include "btf.h"
#include "bpf.h"
#include "libbpf.h"
-#include "libbpf_util.h"
-
-#define max(a, b) ((a) > (b) ? (a) : (b))
-#define min(a, b) ((a) < (b) ? (a) : (b))
+#include "libbpf_internal.h"
+#include "hashmap.h"
#define BTF_MAX_NR_TYPES 0x7fffffff
#define BTF_MAX_STR_OFFSET 0x7fffffff
-#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \
- ((k) == BTF_KIND_VOLATILE) || \
- ((k) == BTF_KIND_CONST) || \
- ((k) == BTF_KIND_RESTRICT))
-
-#define IS_VAR(k) ((k) == BTF_KIND_VAR)
-
static struct btf_type btf_void;
struct btf {
@@ -42,47 +36,6 @@ struct btf {
int fd;
};
-struct btf_ext_info {
- /*
- * info points to the individual info section (e.g. func_info and
- * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
- */
- void *info;
- __u32 rec_size;
- __u32 len;
-};
-
-struct btf_ext {
- union {
- struct btf_ext_header *hdr;
- void *data;
- };
- struct btf_ext_info func_info;
- struct btf_ext_info line_info;
- __u32 data_size;
-};
-
-struct btf_ext_info_sec {
- __u32 sec_name_off;
- __u32 num_info;
- /* Followed by num_info * record_size number of bytes */
- __u8 data[0];
-};
-
-/* The minimum bpf_func_info checked by the loader */
-struct bpf_func_info_min {
- __u32 insn_off;
- __u32 type_id;
-};
-
-/* The minimum bpf_line_info checked by the loader */
-struct bpf_line_info_min {
- __u32 insn_off;
- __u32 file_name_off;
- __u32 line_off;
- __u32 line_col;
-};
-
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
@@ -192,9 +145,9 @@ static int btf_parse_str_sec(struct btf *btf)
static int btf_type_size(struct btf_type *t)
{
int base_size = sizeof(struct btf_type);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ __u16 vlen = btf_vlen(t);
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_FWD:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
@@ -219,7 +172,7 @@ static int btf_type_size(struct btf_type *t)
case BTF_KIND_DATASEC:
return base_size + vlen * sizeof(struct btf_var_secinfo);
default:
- pr_debug("Unsupported BTF_KIND:%u\n", BTF_INFO_KIND(t->info));
+ pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
return -EINVAL;
}
}
@@ -263,7 +216,7 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
static bool btf_type_is_void(const struct btf_type *t)
{
- return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+ return t == &btf_void || btf_is_fwd(t);
}
static bool btf_type_is_void_or_null(const struct btf_type *t)
@@ -284,7 +237,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
t = btf__type_by_id(btf, type_id);
for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
i++) {
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
@@ -303,7 +256,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
type_id = t->type;
break;
case BTF_KIND_ARRAY:
- array = (const struct btf_array *)(t + 1);
+ array = btf_array(t);
if (nelems && array->nelems > UINT32_MAX / nelems)
return -E2BIG;
nelems *= array->nelems;
@@ -334,8 +287,7 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
t = btf__type_by_id(btf, type_id);
while (depth < MAX_RESOLVE_DEPTH &&
!btf_type_is_void_or_null(t) &&
- (IS_MODIFIER(BTF_INFO_KIND(t->info)) ||
- IS_VAR(BTF_INFO_KIND(t->info)))) {
+ (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
type_id = t->type;
t = btf__type_by_id(btf, type_id);
depth++;
@@ -417,6 +369,132 @@ done:
return btf;
}
+static bool btf_check_endianness(const GElf_Ehdr *ehdr)
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+}
+
+struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
+{
+ Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
+ int err = 0, fd = -1, idx = 0;
+ struct btf *btf = NULL;
+ Elf_Scn *scn = NULL;
+ Elf *elf = NULL;
+ GElf_Ehdr ehdr;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ pr_warning("failed to init libelf for %s\n", path);
+ return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
+ }
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ err = -errno;
+ pr_warning("failed to open %s: %s\n", path, strerror(errno));
+ return ERR_PTR(err);
+ }
+
+ err = -LIBBPF_ERRNO__FORMAT;
+
+ elf = elf_begin(fd, ELF_C_READ, NULL);
+ if (!elf) {
+ pr_warning("failed to open %s as ELF file\n", path);
+ goto done;
+ }
+ if (!gelf_getehdr(elf, &ehdr)) {
+ pr_warning("failed to get EHDR from %s\n", path);
+ goto done;
+ }
+ if (!btf_check_endianness(&ehdr)) {
+ pr_warning("non-native ELF endianness is not supported\n");
+ goto done;
+ }
+ if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
+ pr_warning("failed to get e_shstrndx from %s\n", path);
+ goto done;
+ }
+
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ GElf_Shdr sh;
+ char *name;
+
+ idx++;
+ if (gelf_getshdr(scn, &sh) != &sh) {
+ pr_warning("failed to get section(%d) header from %s\n",
+ idx, path);
+ goto done;
+ }
+ name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
+ if (!name) {
+ pr_warning("failed to get section(%d) name from %s\n",
+ idx, path);
+ goto done;
+ }
+ if (strcmp(name, BTF_ELF_SEC) == 0) {
+ btf_data = elf_getdata(scn, 0);
+ if (!btf_data) {
+ pr_warning("failed to get section(%d, %s) data from %s\n",
+ idx, name, path);
+ goto done;
+ }
+ continue;
+ } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
+ btf_ext_data = elf_getdata(scn, 0);
+ if (!btf_ext_data) {
+ pr_warning("failed to get section(%d, %s) data from %s\n",
+ idx, name, path);
+ goto done;
+ }
+ continue;
+ }
+ }
+
+ err = 0;
+
+ if (!btf_data) {
+ err = -ENOENT;
+ goto done;
+ }
+ btf = btf__new(btf_data->d_buf, btf_data->d_size);
+ if (IS_ERR(btf))
+ goto done;
+
+ if (btf_ext && btf_ext_data) {
+ *btf_ext = btf_ext__new(btf_ext_data->d_buf,
+ btf_ext_data->d_size);
+ if (IS_ERR(*btf_ext))
+ goto done;
+ } else if (btf_ext) {
+ *btf_ext = NULL;
+ }
+done:
+ if (elf)
+ elf_end(elf);
+ close(fd);
+
+ if (err)
+ return ERR_PTR(err);
+ /*
+ * btf is always parsed before btf_ext, so no need to clean up
+ * btf_ext, if btf loading failed
+ */
+ if (IS_ERR(btf))
+ return btf;
+ if (btf_ext && IS_ERR(*btf_ext)) {
+ btf__free(btf);
+ err = PTR_ERR(*btf_ext);
+ return ERR_PTR(err);
+ }
+ return btf;
+}
+
static int compare_vsi_off(const void *_a, const void *_b)
{
const struct btf_var_secinfo *a = _a;
@@ -428,11 +506,11 @@ static int compare_vsi_off(const void *_a, const void *_b)
static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
struct btf_type *t)
{
- __u32 size = 0, off = 0, i, vars = BTF_INFO_VLEN(t->info);
+ __u32 size = 0, off = 0, i, vars = btf_vlen(t);
const char *name = btf__name_by_offset(btf, t->name_off);
const struct btf_type *t_var;
struct btf_var_secinfo *vsi;
- struct btf_var *var;
+ const struct btf_var *var;
int ret;
if (!name) {
@@ -448,12 +526,11 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
t->size = size;
- for (i = 0, vsi = (struct btf_var_secinfo *)(t + 1);
- i < vars; i++, vsi++) {
+ for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
t_var = btf__type_by_id(btf, vsi->type);
- var = (struct btf_var *)(t_var + 1);
+ var = btf_var(t_var);
- if (BTF_INFO_KIND(t_var->info) != BTF_KIND_VAR) {
+ if (!btf_is_var(t_var)) {
pr_debug("Non-VAR type seen in section %s\n", name);
return -EINVAL;
}
@@ -469,7 +546,8 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
ret = bpf_object__variable_offset(obj, name, &off);
if (ret) {
- pr_debug("No offset found in symbol table for VAR %s\n", name);
+ pr_debug("No offset found in symbol table for VAR %s\n",
+ name);
return -ENOENT;
}
@@ -493,7 +571,7 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
* is section size and global variable offset. We use
* the info from the ELF itself for this purpose.
*/
- if (BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC) {
+ if (btf_is_datasec(t)) {
err = btf_fixup_datasec(obj, btf, t);
if (err)
break;
@@ -648,14 +726,13 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
return -EINVAL;
}
- if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
- BTF_INFO_VLEN(container_type->info) < 2) {
+ if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
pr_warning("map:%s container_name:%s is an invalid container struct\n",
map_name, container_name);
return -EINVAL;
}
- key = (struct btf_member *)(container_type + 1);
+ key = btf_members(container_type);
value = key + 1;
key_size = btf__resolve_size(btf, key->type);
@@ -705,6 +782,9 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
/* The start of the info sec (including the __u32 record_size). */
void *info;
+ if (ext_sec->len == 0)
+ return 0;
+
if (ext_sec->off & 0x03) {
pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
ext_sec->desc);
@@ -808,11 +888,24 @@ static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
return btf_ext_setup_info(btf_ext, &param);
}
+static int btf_ext_setup_offset_reloc(struct btf_ext *btf_ext)
+{
+ struct btf_ext_sec_setup_param param = {
+ .off = btf_ext->hdr->offset_reloc_off,
+ .len = btf_ext->hdr->offset_reloc_len,
+ .min_rec_size = sizeof(struct bpf_offset_reloc),
+ .ext_info = &btf_ext->offset_reloc_info,
+ .desc = "offset_reloc",
+ };
+
+ return btf_ext_setup_info(btf_ext, &param);
+}
+
static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
{
const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
- if (data_size < offsetof(struct btf_ext_header, func_info_off) ||
+ if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
data_size < hdr->hdr_len) {
pr_debug("BTF.ext header not found");
return -EINVAL;
@@ -870,6 +963,9 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
}
memcpy(btf_ext->data, data, size);
+ if (btf_ext->hdr->hdr_len <
+ offsetofend(struct btf_ext_header, line_info_len))
+ goto done;
err = btf_ext_setup_func_info(btf_ext);
if (err)
goto done;
@@ -878,6 +974,13 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
if (err)
goto done;
+ if (btf_ext->hdr->hdr_len <
+ offsetofend(struct btf_ext_header, offset_reloc_len))
+ goto done;
+ err = btf_ext_setup_offset_reloc(btf_ext);
+ if (err)
+ goto done;
+
done:
if (err) {
btf_ext__free(btf_ext);
@@ -1165,16 +1268,9 @@ done:
return err;
}
-#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14)
-#define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31
#define BTF_UNPROCESSED_ID ((__u32)-1)
#define BTF_IN_PROGRESS_ID ((__u32)-2)
-struct btf_dedup_node {
- struct btf_dedup_node *next;
- __u32 type_id;
-};
-
struct btf_dedup {
/* .BTF section to be deduped in-place */
struct btf *btf;
@@ -1190,7 +1286,7 @@ struct btf_dedup {
* candidates, which is fine because we rely on subsequent
* btf_xxx_equal() checks to authoritatively verify type equality.
*/
- struct btf_dedup_node **dedup_table;
+ struct hashmap *dedup_table;
/* Canonical types map */
__u32 *map;
/* Hypothetical mapping, used during type graph equivalence checks */
@@ -1215,30 +1311,18 @@ struct btf_str_ptrs {
__u32 cap;
};
-static inline __u32 hash_combine(__u32 h, __u32 value)
+static long hash_combine(long h, long value)
{
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define GOLDEN_RATIO_PRIME 0x9e370001UL
- return h * 37 + value * GOLDEN_RATIO_PRIME;
-#undef GOLDEN_RATIO_PRIME
+ return h * 31 + value;
}
-#define for_each_dedup_cand(d, hash, node) \
- for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \
- node; \
- node = node->next)
+#define for_each_dedup_cand(d, node, hash) \
+ hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
-static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id)
+static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
{
- struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node));
- int bucket = hash & (d->opts.dedup_table_size - 1);
-
- if (!node)
- return -ENOMEM;
- node->type_id = type_id;
- node->next = d->dedup_table[bucket];
- d->dedup_table[bucket] = node;
- return 0;
+ return hashmap__append(d->dedup_table,
+ (void *)hash, (void *)(long)type_id);
}
static int btf_dedup_hypot_map_add(struct btf_dedup *d,
@@ -1267,36 +1351,10 @@ static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
d->hypot_cnt = 0;
}
-static void btf_dedup_table_free(struct btf_dedup *d)
-{
- struct btf_dedup_node *head, *tmp;
- int i;
-
- if (!d->dedup_table)
- return;
-
- for (i = 0; i < d->opts.dedup_table_size; i++) {
- while (d->dedup_table[i]) {
- tmp = d->dedup_table[i];
- d->dedup_table[i] = tmp->next;
- free(tmp);
- }
-
- head = d->dedup_table[i];
- while (head) {
- tmp = head;
- head = head->next;
- free(tmp);
- }
- }
-
- free(d->dedup_table);
- d->dedup_table = NULL;
-}
-
static void btf_dedup_free(struct btf_dedup *d)
{
- btf_dedup_table_free(d);
+ hashmap__free(d->dedup_table);
+ d->dedup_table = NULL;
free(d->map);
d->map = NULL;
@@ -1310,40 +1368,43 @@ static void btf_dedup_free(struct btf_dedup *d)
free(d);
}
-/* Find closest power of two >= to size, capped at 2^max_size_log */
-static __u32 roundup_pow2_max(__u32 size, int max_size_log)
+static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
{
- int i;
+ return (size_t)key;
+}
- for (i = 0; i < max_size_log && (1U << i) < size; i++)
- ;
- return 1U << i;
+static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
+{
+ return 0;
}
+static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
+{
+ return k1 == k2;
+}
static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
const struct btf_dedup_opts *opts)
{
struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
+ hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
int i, err = 0;
- __u32 sz;
if (!d)
return ERR_PTR(-ENOMEM);
d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
- sz = opts && opts->dedup_table_size ? opts->dedup_table_size
- : BTF_DEDUP_TABLE_DEFAULT_SIZE;
- sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG);
- d->opts.dedup_table_size = sz;
+ /* dedup_table_size is now used only to force collisions in tests */
+ if (opts && opts->dedup_table_size == 1)
+ hash_fn = btf_dedup_collision_hash_fn;
d->btf = btf;
d->btf_ext = btf_ext;
- d->dedup_table = calloc(d->opts.dedup_table_size,
- sizeof(struct btf_dedup_node *));
- if (!d->dedup_table) {
- err = -ENOMEM;
+ d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
+ if (IS_ERR(d->dedup_table)) {
+ err = PTR_ERR(d->dedup_table);
+ d->dedup_table = NULL;
goto done;
}
@@ -1356,10 +1417,9 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
d->map[0] = 0;
for (i = 1; i <= btf->nr_types; i++) {
struct btf_type *t = d->btf->types[i];
- __u16 kind = BTF_INFO_KIND(t->info);
/* VAR and DATASEC are never deduped and are self-canonical */
- if (kind == BTF_KIND_VAR || kind == BTF_KIND_DATASEC)
+ if (btf_is_var(t) || btf_is_datasec(t))
d->map[i] = i;
else
d->map[i] = BTF_UNPROCESSED_ID;
@@ -1400,11 +1460,11 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
if (r)
return r;
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
- struct btf_member *m = (struct btf_member *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_member *m = btf_members(t);
+ __u16 vlen = btf_vlen(t);
for (j = 0; j < vlen; j++) {
r = fn(&m->name_off, ctx);
@@ -1415,8 +1475,8 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
break;
}
case BTF_KIND_ENUM: {
- struct btf_enum *m = (struct btf_enum *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_enum *m = btf_enum(t);
+ __u16 vlen = btf_vlen(t);
for (j = 0; j < vlen; j++) {
r = fn(&m->name_off, ctx);
@@ -1427,8 +1487,8 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
break;
}
case BTF_KIND_FUNC_PROTO: {
- struct btf_param *m = (struct btf_param *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_param *m = btf_params(t);
+ __u16 vlen = btf_vlen(t);
for (j = 0; j < vlen; j++) {
r = fn(&m->name_off, ctx);
@@ -1662,9 +1722,9 @@ done:
return err;
}
-static __u32 btf_hash_common(struct btf_type *t)
+static long btf_hash_common(struct btf_type *t)
{
- __u32 h;
+ long h;
h = hash_combine(0, t->name_off);
h = hash_combine(h, t->info);
@@ -1680,10 +1740,10 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
}
/* Calculate type signature hash of INT. */
-static __u32 btf_hash_int(struct btf_type *t)
+static long btf_hash_int(struct btf_type *t)
{
__u32 info = *(__u32 *)(t + 1);
- __u32 h;
+ long h;
h = btf_hash_common(t);
h = hash_combine(h, info);
@@ -1703,9 +1763,9 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
}
/* Calculate type signature hash of ENUM. */
-static __u32 btf_hash_enum(struct btf_type *t)
+static long btf_hash_enum(struct btf_type *t)
{
- __u32 h;
+ long h;
/* don't hash vlen and enum members to support enum fwd resolving */
h = hash_combine(0, t->name_off);
@@ -1717,16 +1777,16 @@ static __u32 btf_hash_enum(struct btf_type *t)
/* Check structural equality of two ENUMs. */
static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_enum *m1, *m2;
+ const struct btf_enum *m1, *m2;
__u16 vlen;
int i;
if (!btf_equal_common(t1, t2))
return false;
- vlen = BTF_INFO_VLEN(t1->info);
- m1 = (struct btf_enum *)(t1 + 1);
- m2 = (struct btf_enum *)(t2 + 1);
+ vlen = btf_vlen(t1);
+ m1 = btf_enum(t1);
+ m2 = btf_enum(t2);
for (i = 0; i < vlen; i++) {
if (m1->name_off != m2->name_off || m1->val != m2->val)
return false;
@@ -1738,8 +1798,7 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
static inline bool btf_is_enum_fwd(struct btf_type *t)
{
- return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM &&
- BTF_INFO_VLEN(t->info) == 0;
+ return btf_is_enum(t) && btf_vlen(t) == 0;
}
static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
@@ -1757,11 +1816,11 @@ static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
* as referenced type IDs equivalence is established separately during type
* graph equivalence check algorithm.
*/
-static __u32 btf_hash_struct(struct btf_type *t)
+static long btf_hash_struct(struct btf_type *t)
{
- struct btf_member *member = (struct btf_member *)(t + 1);
- __u32 vlen = BTF_INFO_VLEN(t->info);
- __u32 h = btf_hash_common(t);
+ const struct btf_member *member = btf_members(t);
+ __u32 vlen = btf_vlen(t);
+ long h = btf_hash_common(t);
int i;
for (i = 0; i < vlen; i++) {
@@ -1780,16 +1839,16 @@ static __u32 btf_hash_struct(struct btf_type *t)
*/
static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_member *m1, *m2;
+ const struct btf_member *m1, *m2;
__u16 vlen;
int i;
if (!btf_equal_common(t1, t2))
return false;
- vlen = BTF_INFO_VLEN(t1->info);
- m1 = (struct btf_member *)(t1 + 1);
- m2 = (struct btf_member *)(t2 + 1);
+ vlen = btf_vlen(t1);
+ m1 = btf_members(t1);
+ m2 = btf_members(t2);
for (i = 0; i < vlen; i++) {
if (m1->name_off != m2->name_off || m1->offset != m2->offset)
return false;
@@ -1804,10 +1863,10 @@ static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
* under assumption that they were already resolved to canonical type IDs and
* are not going to change.
*/
-static __u32 btf_hash_array(struct btf_type *t)
+static long btf_hash_array(struct btf_type *t)
{
- struct btf_array *info = (struct btf_array *)(t + 1);
- __u32 h = btf_hash_common(t);
+ const struct btf_array *info = btf_array(t);
+ long h = btf_hash_common(t);
h = hash_combine(h, info->type);
h = hash_combine(h, info->index_type);
@@ -1824,13 +1883,13 @@ static __u32 btf_hash_array(struct btf_type *t)
*/
static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_array *info1, *info2;
+ const struct btf_array *info1, *info2;
if (!btf_equal_common(t1, t2))
return false;
- info1 = (struct btf_array *)(t1 + 1);
- info2 = (struct btf_array *)(t2 + 1);
+ info1 = btf_array(t1);
+ info2 = btf_array(t2);
return info1->type == info2->type &&
info1->index_type == info2->index_type &&
info1->nelems == info2->nelems;
@@ -1843,14 +1902,10 @@ static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
*/
static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_array *info1, *info2;
-
if (!btf_equal_common(t1, t2))
return false;
- info1 = (struct btf_array *)(t1 + 1);
- info2 = (struct btf_array *)(t2 + 1);
- return info1->nelems == info2->nelems;
+ return btf_array(t1)->nelems == btf_array(t2)->nelems;
}
/*
@@ -1858,11 +1913,11 @@ static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
* under assumption that they were already resolved to canonical type IDs and
* are not going to change.
*/
-static inline __u32 btf_hash_fnproto(struct btf_type *t)
+static long btf_hash_fnproto(struct btf_type *t)
{
- struct btf_param *member = (struct btf_param *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
- __u32 h = btf_hash_common(t);
+ const struct btf_param *member = btf_params(t);
+ __u16 vlen = btf_vlen(t);
+ long h = btf_hash_common(t);
int i;
for (i = 0; i < vlen; i++) {
@@ -1880,18 +1935,18 @@ static inline __u32 btf_hash_fnproto(struct btf_type *t)
* This function is called during reference types deduplication to compare
* FUNC_PROTO to potential canonical representative.
*/
-static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
+static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_param *m1, *m2;
+ const struct btf_param *m1, *m2;
__u16 vlen;
int i;
if (!btf_equal_common(t1, t2))
return false;
- vlen = BTF_INFO_VLEN(t1->info);
- m1 = (struct btf_param *)(t1 + 1);
- m2 = (struct btf_param *)(t2 + 1);
+ vlen = btf_vlen(t1);
+ m1 = btf_params(t1);
+ m2 = btf_params(t2);
for (i = 0; i < vlen; i++) {
if (m1->name_off != m2->name_off || m1->type != m2->type)
return false;
@@ -1906,9 +1961,9 @@ static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
* IDs. This check is performed during type graph equivalence check and
* referenced types equivalence is checked separately.
*/
-static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
+static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_param *m1, *m2;
+ const struct btf_param *m1, *m2;
__u16 vlen;
int i;
@@ -1916,9 +1971,9 @@ static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
if (t1->name_off != t2->name_off || t1->info != t2->info)
return false;
- vlen = BTF_INFO_VLEN(t1->info);
- m1 = (struct btf_param *)(t1 + 1);
- m2 = (struct btf_param *)(t2 + 1);
+ vlen = btf_vlen(t1);
+ m1 = btf_params(t1);
+ m2 = btf_params(t2);
for (i = 0; i < vlen; i++) {
if (m1->name_off != m2->name_off)
return false;
@@ -1937,13 +1992,14 @@ static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
{
struct btf_type *t = d->btf->types[type_id];
+ struct hashmap_entry *hash_entry;
struct btf_type *cand;
- struct btf_dedup_node *cand_node;
/* if we don't find equivalent type, then we are canonical */
__u32 new_id = type_id;
- __u32 h;
+ __u32 cand_id;
+ long h;
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
@@ -1960,10 +2016,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_INT:
h = btf_hash_int(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_int(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
}
@@ -1971,10 +2028,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_ENUM:
h = btf_hash_enum(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_enum(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
if (d->opts.dont_resolve_fwds)
@@ -1982,21 +2040,22 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
if (btf_compat_enum(t, cand)) {
if (btf_is_enum_fwd(t)) {
/* resolve fwd to full enum */
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
/* resolve canonical enum fwd to full enum */
- d->map[cand_node->type_id] = type_id;
+ d->map[cand_id] = type_id;
}
}
break;
case BTF_KIND_FWD:
h = btf_hash_common(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_common(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
}
@@ -2053,13 +2112,13 @@ static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
{
__u32 orig_type_id = type_id;
- if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
+ if (!btf_is_fwd(d->btf->types[type_id]))
return type_id;
while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
type_id = d->map[type_id];
- if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
+ if (!btf_is_fwd(d->btf->types[type_id]))
return type_id;
return orig_type_id;
@@ -2068,7 +2127,7 @@ static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
static inline __u16 btf_fwd_kind(struct btf_type *t)
{
- return BTF_INFO_KFLAG(t->info) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
+ return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
}
/*
@@ -2189,8 +2248,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
cand_type = d->btf->types[cand_id];
canon_type = d->btf->types[canon_id];
- cand_kind = BTF_INFO_KIND(cand_type->info);
- canon_kind = BTF_INFO_KIND(canon_type->info);
+ cand_kind = btf_kind(cand_type);
+ canon_kind = btf_kind(canon_type);
if (cand_type->name_off != canon_type->name_off)
return 0;
@@ -2239,12 +2298,12 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
case BTF_KIND_ARRAY: {
- struct btf_array *cand_arr, *canon_arr;
+ const struct btf_array *cand_arr, *canon_arr;
if (!btf_compat_array(cand_type, canon_type))
return 0;
- cand_arr = (struct btf_array *)(cand_type + 1);
- canon_arr = (struct btf_array *)(canon_type + 1);
+ cand_arr = btf_array(cand_type);
+ canon_arr = btf_array(canon_type);
eq = btf_dedup_is_equiv(d,
cand_arr->index_type, canon_arr->index_type);
if (eq <= 0)
@@ -2254,14 +2313,14 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
- struct btf_member *cand_m, *canon_m;
+ const struct btf_member *cand_m, *canon_m;
__u16 vlen;
if (!btf_shallow_equal_struct(cand_type, canon_type))
return 0;
- vlen = BTF_INFO_VLEN(cand_type->info);
- cand_m = (struct btf_member *)(cand_type + 1);
- canon_m = (struct btf_member *)(canon_type + 1);
+ vlen = btf_vlen(cand_type);
+ cand_m = btf_members(cand_type);
+ canon_m = btf_members(canon_type);
for (i = 0; i < vlen; i++) {
eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
if (eq <= 0)
@@ -2274,7 +2333,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
}
case BTF_KIND_FUNC_PROTO: {
- struct btf_param *cand_p, *canon_p;
+ const struct btf_param *cand_p, *canon_p;
__u16 vlen;
if (!btf_compat_fnproto(cand_type, canon_type))
@@ -2282,9 +2341,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
if (eq <= 0)
return eq;
- vlen = BTF_INFO_VLEN(cand_type->info);
- cand_p = (struct btf_param *)(cand_type + 1);
- canon_p = (struct btf_param *)(canon_type + 1);
+ vlen = btf_vlen(cand_type);
+ cand_p = btf_params(cand_type);
+ canon_p = btf_params(canon_type);
for (i = 0; i < vlen; i++) {
eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
if (eq <= 0)
@@ -2339,8 +2398,8 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
targ_type_id = d->hypot_map[cand_type_id];
t_id = resolve_type_id(d, targ_type_id);
c_id = resolve_type_id(d, cand_type_id);
- t_kind = BTF_INFO_KIND(d->btf->types[t_id]->info);
- c_kind = BTF_INFO_KIND(d->btf->types[c_id]->info);
+ t_kind = btf_kind(d->btf->types[t_id]);
+ c_kind = btf_kind(d->btf->types[c_id]);
/*
* Resolve FWD into STRUCT/UNION.
* It's ok to resolve FWD into STRUCT/UNION that's not yet
@@ -2397,25 +2456,26 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
*/
static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
{
- struct btf_dedup_node *cand_node;
struct btf_type *cand_type, *t;
+ struct hashmap_entry *hash_entry;
/* if we don't find equivalent type, then we are canonical */
__u32 new_id = type_id;
__u16 kind;
- __u32 h;
+ long h;
/* already deduped or is in process of deduping (loop detected) */
if (d->map[type_id] <= BTF_MAX_NR_TYPES)
return 0;
t = d->btf->types[type_id];
- kind = BTF_INFO_KIND(t->info);
+ kind = btf_kind(t);
if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
return 0;
h = btf_hash_struct(t);
- for_each_dedup_cand(d, h, cand_node) {
+ for_each_dedup_cand(d, hash_entry, h) {
+ __u32 cand_id = (__u32)(long)hash_entry->value;
int eq;
/*
@@ -2428,17 +2488,17 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
* creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
* FWD and compatible STRUCT/UNION are considered equivalent.
*/
- cand_type = d->btf->types[cand_node->type_id];
+ cand_type = d->btf->types[cand_id];
if (!btf_shallow_equal_struct(t, cand_type))
continue;
btf_dedup_clear_hypot_map(d);
- eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id);
+ eq = btf_dedup_is_equiv(d, type_id, cand_id);
if (eq < 0)
return eq;
if (!eq)
continue;
- new_id = cand_node->type_id;
+ new_id = cand_id;
btf_dedup_merge_hypot_map(d);
break;
}
@@ -2488,12 +2548,12 @@ static int btf_dedup_struct_types(struct btf_dedup *d)
*/
static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
{
- struct btf_dedup_node *cand_node;
+ struct hashmap_entry *hash_entry;
+ __u32 new_id = type_id, cand_id;
struct btf_type *t, *cand;
/* if we don't find equivalent type, then we are representative type */
- __u32 new_id = type_id;
int ref_type_id;
- __u32 h;
+ long h;
if (d->map[type_id] == BTF_IN_PROGRESS_ID)
return -ELOOP;
@@ -2503,7 +2563,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
t = d->btf->types[type_id];
d->map[type_id] = BTF_IN_PROGRESS_ID;
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
@@ -2516,17 +2576,18 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
t->type = ref_type_id;
h = btf_hash_common(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_common(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
}
break;
case BTF_KIND_ARRAY: {
- struct btf_array *info = (struct btf_array *)(t + 1);
+ struct btf_array *info = btf_array(t);
ref_type_id = btf_dedup_ref_type(d, info->type);
if (ref_type_id < 0)
@@ -2539,10 +2600,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
info->index_type = ref_type_id;
h = btf_hash_array(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_array(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
}
@@ -2559,8 +2621,8 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
return ref_type_id;
t->type = ref_type_id;
- vlen = BTF_INFO_VLEN(t->info);
- param = (struct btf_param *)(t + 1);
+ vlen = btf_vlen(t);
+ param = btf_params(t);
for (i = 0; i < vlen; i++) {
ref_type_id = btf_dedup_ref_type(d, param->type);
if (ref_type_id < 0)
@@ -2570,10 +2632,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
}
h = btf_hash_fnproto(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_fnproto(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
}
@@ -2600,7 +2663,9 @@ static int btf_dedup_ref_types(struct btf_dedup *d)
if (err < 0)
return err;
}
- btf_dedup_table_free(d);
+ /* we won't need d->dedup_table anymore */
+ hashmap__free(d->dedup_table);
+ d->dedup_table = NULL;
return 0;
}
@@ -2697,7 +2762,7 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
struct btf_type *t = d->btf->types[type_id];
int i, r;
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
break;
@@ -2717,7 +2782,7 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
break;
case BTF_KIND_ARRAY: {
- struct btf_array *arr_info = (struct btf_array *)(t + 1);
+ struct btf_array *arr_info = btf_array(t);
r = btf_dedup_remap_type_id(d, arr_info->type);
if (r < 0)
@@ -2732,8 +2797,8 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
- struct btf_member *member = (struct btf_member *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_member *member = btf_members(t);
+ __u16 vlen = btf_vlen(t);
for (i = 0; i < vlen; i++) {
r = btf_dedup_remap_type_id(d, member->type);
@@ -2746,8 +2811,8 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
}
case BTF_KIND_FUNC_PROTO: {
- struct btf_param *param = (struct btf_param *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_param *param = btf_params(t);
+ __u16 vlen = btf_vlen(t);
r = btf_dedup_remap_type_id(d, t->type);
if (r < 0)
@@ -2765,8 +2830,8 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
}
case BTF_KIND_DATASEC: {
- struct btf_var_secinfo *var = (struct btf_var_secinfo *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_var_secinfo *var = btf_var_secinfos(t);
+ __u16 vlen = btf_vlen(t);
for (i = 0; i < vlen; i++) {
r = btf_dedup_remap_type_id(d, var->type);
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index c7b399e81fce..9cb44b4fbf60 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -4,6 +4,8 @@
#ifndef __LIBBPF_BTF_H
#define __LIBBPF_BTF_H
+#include <stdarg.h>
+#include <linux/btf.h>
#include <linux/types.h>
#ifdef __cplusplus
@@ -16,6 +18,7 @@ extern "C" {
#define BTF_ELF_SEC ".BTF"
#define BTF_EXT_ELF_SEC ".BTF.ext"
+#define MAPS_ELF_SEC ".maps"
struct btf;
struct btf_ext;
@@ -55,10 +58,16 @@ struct btf_ext_header {
__u32 func_info_len;
__u32 line_info_off;
__u32 line_info_len;
+
+ /* optional part of .BTF.ext header */
+ __u32 offset_reloc_off;
+ __u32 offset_reloc_len;
};
LIBBPF_API void btf__free(struct btf *btf);
LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
+LIBBPF_API struct btf *btf__parse_elf(const char *path,
+ struct btf_ext **btf_ext);
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
LIBBPF_API int btf__load(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
@@ -100,6 +109,199 @@ struct btf_dedup_opts {
LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
const struct btf_dedup_opts *opts);
+struct btf_dump;
+
+struct btf_dump_opts {
+ void *ctx;
+};
+
+typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
+
+LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const struct btf_dump_opts *opts,
+ btf_dump_printf_fn_t printf_fn);
+LIBBPF_API void btf_dump__free(struct btf_dump *d);
+
+LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
+
+/*
+ * A set of helpers for easier BTF types handling
+ */
+static inline __u16 btf_kind(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info);
+}
+
+static inline __u16 btf_vlen(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline bool btf_kflag(const struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info);
+}
+
+static inline bool btf_is_int(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_INT;
+}
+
+static inline bool btf_is_ptr(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_PTR;
+}
+
+static inline bool btf_is_array(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ARRAY;
+}
+
+static inline bool btf_is_struct(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_STRUCT;
+}
+
+static inline bool btf_is_union(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_UNION;
+}
+
+static inline bool btf_is_composite(const struct btf_type *t)
+{
+ __u16 kind = btf_kind(t);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
+static inline bool btf_is_enum(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_is_fwd(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_FWD;
+}
+
+static inline bool btf_is_typedef(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_TYPEDEF;
+}
+
+static inline bool btf_is_volatile(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_VOLATILE;
+}
+
+static inline bool btf_is_const(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_CONST;
+}
+
+static inline bool btf_is_restrict(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_RESTRICT;
+}
+
+static inline bool btf_is_mod(const struct btf_type *t)
+{
+ __u16 kind = btf_kind(t);
+
+ return kind == BTF_KIND_VOLATILE ||
+ kind == BTF_KIND_CONST ||
+ kind == BTF_KIND_RESTRICT;
+}
+
+static inline bool btf_is_func(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_FUNC;
+}
+
+static inline bool btf_is_func_proto(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_FUNC_PROTO;
+}
+
+static inline bool btf_is_var(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_VAR;
+}
+
+static inline bool btf_is_datasec(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_DATASEC;
+}
+
+static inline __u8 btf_int_encoding(const struct btf_type *t)
+{
+ return BTF_INT_ENCODING(*(__u32 *)(t + 1));
+}
+
+static inline __u8 btf_int_offset(const struct btf_type *t)
+{
+ return BTF_INT_OFFSET(*(__u32 *)(t + 1));
+}
+
+static inline __u8 btf_int_bits(const struct btf_type *t)
+{
+ return BTF_INT_BITS(*(__u32 *)(t + 1));
+}
+
+static inline struct btf_array *btf_array(const struct btf_type *t)
+{
+ return (struct btf_array *)(t + 1);
+}
+
+static inline struct btf_enum *btf_enum(const struct btf_type *t)
+{
+ return (struct btf_enum *)(t + 1);
+}
+
+static inline struct btf_member *btf_members(const struct btf_type *t)
+{
+ return (struct btf_member *)(t + 1);
+}
+
+/* Get bit offset of a member with specified index. */
+static inline __u32 btf_member_bit_offset(const struct btf_type *t,
+ __u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+ bool kflag = btf_kflag(t);
+
+ return kflag ? BTF_MEMBER_BIT_OFFSET(m->offset) : m->offset;
+}
+/*
+ * Get bitfield size of a member, assuming t is BTF_KIND_STRUCT or
+ * BTF_KIND_UNION. If member is not a bitfield, zero is returned.
+ */
+static inline __u32 btf_member_bitfield_size(const struct btf_type *t,
+ __u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+ bool kflag = btf_kflag(t);
+
+ return kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0;
+}
+
+static inline struct btf_param *btf_params(const struct btf_type *t)
+{
+ return (struct btf_param *)(t + 1);
+}
+
+static inline struct btf_var *btf_var(const struct btf_type *t)
+{
+ return (struct btf_var *)(t + 1);
+}
+
+static inline struct btf_var_secinfo *
+btf_var_secinfos(const struct btf_type *t)
+{
+ return (struct btf_var_secinfo *)(t + 1);
+}
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
new file mode 100644
index 000000000000..715967762312
--- /dev/null
+++ b/tools/lib/bpf/btf_dump.c
@@ -0,0 +1,1287 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * BTF-to-C type converter.
+ *
+ * Copyright (c) 2019 Facebook
+ */
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <linux/err.h>
+#include <linux/btf.h>
+#include "btf.h"
+#include "hashmap.h"
+#include "libbpf.h"
+#include "libbpf_internal.h"
+
+static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
+static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
+
+static const char *pfx(int lvl)
+{
+ return lvl >= PREFIX_CNT ? PREFIXES : &PREFIXES[PREFIX_CNT - lvl];
+}
+
+enum btf_dump_type_order_state {
+ NOT_ORDERED,
+ ORDERING,
+ ORDERED,
+};
+
+enum btf_dump_type_emit_state {
+ NOT_EMITTED,
+ EMITTING,
+ EMITTED,
+};
+
+/* per-type auxiliary state */
+struct btf_dump_type_aux_state {
+ /* topological sorting state */
+ enum btf_dump_type_order_state order_state: 2;
+ /* emitting state used to determine the need for forward declaration */
+ enum btf_dump_type_emit_state emit_state: 2;
+ /* whether forward declaration was already emitted */
+ __u8 fwd_emitted: 1;
+ /* whether unique non-duplicate name was already assigned */
+ __u8 name_resolved: 1;
+};
+
+struct btf_dump {
+ const struct btf *btf;
+ const struct btf_ext *btf_ext;
+ btf_dump_printf_fn_t printf_fn;
+ struct btf_dump_opts opts;
+
+ /* per-type auxiliary state */
+ struct btf_dump_type_aux_state *type_states;
+ /* per-type optional cached unique name, must be freed, if present */
+ const char **cached_names;
+
+ /* topo-sorted list of dependent type definitions */
+ __u32 *emit_queue;
+ int emit_queue_cap;
+ int emit_queue_cnt;
+
+ /*
+ * stack of type declarations (e.g., chain of modifiers, arrays,
+ * funcs, etc)
+ */
+ __u32 *decl_stack;
+ int decl_stack_cap;
+ int decl_stack_cnt;
+
+ /* maps struct/union/enum name to a number of name occurrences */
+ struct hashmap *type_names;
+ /*
+ * maps typedef identifiers and enum value names to a number of such
+ * name occurrences
+ */
+ struct hashmap *ident_names;
+};
+
+static size_t str_hash_fn(const void *key, void *ctx)
+{
+ const char *s = key;
+ size_t h = 0;
+
+ while (*s) {
+ h = h * 31 + *s;
+ s++;
+ }
+ return h;
+}
+
+static bool str_equal_fn(const void *a, const void *b, void *ctx)
+{
+ return strcmp(a, b) == 0;
+}
+
+static const char *btf_name_of(const struct btf_dump *d, __u32 name_off)
+{
+ return btf__name_by_offset(d->btf, name_off);
+}
+
+static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ d->printf_fn(d->opts.ctx, fmt, args);
+ va_end(args);
+}
+
+struct btf_dump *btf_dump__new(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const struct btf_dump_opts *opts,
+ btf_dump_printf_fn_t printf_fn)
+{
+ struct btf_dump *d;
+ int err;
+
+ d = calloc(1, sizeof(struct btf_dump));
+ if (!d)
+ return ERR_PTR(-ENOMEM);
+
+ d->btf = btf;
+ d->btf_ext = btf_ext;
+ d->printf_fn = printf_fn;
+ d->opts.ctx = opts ? opts->ctx : NULL;
+
+ d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
+ if (IS_ERR(d->type_names)) {
+ err = PTR_ERR(d->type_names);
+ d->type_names = NULL;
+ btf_dump__free(d);
+ return ERR_PTR(err);
+ }
+ d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
+ if (IS_ERR(d->ident_names)) {
+ err = PTR_ERR(d->ident_names);
+ d->ident_names = NULL;
+ btf_dump__free(d);
+ return ERR_PTR(err);
+ }
+
+ return d;
+}
+
+void btf_dump__free(struct btf_dump *d)
+{
+ int i, cnt;
+
+ if (!d)
+ return;
+
+ free(d->type_states);
+ if (d->cached_names) {
+ /* any set cached name is owned by us and should be freed */
+ for (i = 0, cnt = btf__get_nr_types(d->btf); i <= cnt; i++) {
+ if (d->cached_names[i])
+ free((void *)d->cached_names[i]);
+ }
+ }
+ free(d->cached_names);
+ free(d->emit_queue);
+ free(d->decl_stack);
+ hashmap__free(d->type_names);
+ hashmap__free(d->ident_names);
+
+ free(d);
+}
+
+static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr);
+static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id);
+
+/*
+ * Dump BTF type in a compilable C syntax, including all the necessary
+ * dependent types, necessary for compilation. If some of the dependent types
+ * were already emitted as part of previous btf_dump__dump_type() invocation
+ * for another type, they won't be emitted again. This API allows callers to
+ * filter out BTF types according to user-defined criterias and emitted only
+ * minimal subset of types, necessary to compile everything. Full struct/union
+ * definitions will still be emitted, even if the only usage is through
+ * pointer and could be satisfied with just a forward declaration.
+ *
+ * Dumping is done in two high-level passes:
+ * 1. Topologically sort type definitions to satisfy C rules of compilation.
+ * 2. Emit type definitions in C syntax.
+ *
+ * Returns 0 on success; <0, otherwise.
+ */
+int btf_dump__dump_type(struct btf_dump *d, __u32 id)
+{
+ int err, i;
+
+ if (id > btf__get_nr_types(d->btf))
+ return -EINVAL;
+
+ /* type states are lazily allocated, as they might not be needed */
+ if (!d->type_states) {
+ d->type_states = calloc(1 + btf__get_nr_types(d->btf),
+ sizeof(d->type_states[0]));
+ if (!d->type_states)
+ return -ENOMEM;
+ d->cached_names = calloc(1 + btf__get_nr_types(d->btf),
+ sizeof(d->cached_names[0]));
+ if (!d->cached_names)
+ return -ENOMEM;
+
+ /* VOID is special */
+ d->type_states[0].order_state = ORDERED;
+ d->type_states[0].emit_state = EMITTED;
+ }
+
+ d->emit_queue_cnt = 0;
+ err = btf_dump_order_type(d, id, false);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < d->emit_queue_cnt; i++)
+ btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/);
+
+ return 0;
+}
+
+static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
+{
+ __u32 *new_queue;
+ size_t new_cap;
+
+ if (d->emit_queue_cnt >= d->emit_queue_cap) {
+ new_cap = max(16, d->emit_queue_cap * 3 / 2);
+ new_queue = realloc(d->emit_queue,
+ new_cap * sizeof(new_queue[0]));
+ if (!new_queue)
+ return -ENOMEM;
+ d->emit_queue = new_queue;
+ d->emit_queue_cap = new_cap;
+ }
+
+ d->emit_queue[d->emit_queue_cnt++] = id;
+ return 0;
+}
+
+/*
+ * Determine order of emitting dependent types and specified type to satisfy
+ * C compilation rules. This is done through topological sorting with an
+ * additional complication which comes from C rules. The main idea for C is
+ * that if some type is "embedded" into a struct/union, it's size needs to be
+ * known at the time of definition of containing type. E.g., for:
+ *
+ * struct A {};
+ * struct B { struct A x; }
+ *
+ * struct A *HAS* to be defined before struct B, because it's "embedded",
+ * i.e., it is part of struct B layout. But in the following case:
+ *
+ * struct A;
+ * struct B { struct A *x; }
+ * struct A {};
+ *
+ * it's enough to just have a forward declaration of struct A at the time of
+ * struct B definition, as struct B has a pointer to struct A, so the size of
+ * field x is known without knowing struct A size: it's sizeof(void *).
+ *
+ * Unfortunately, there are some trickier cases we need to handle, e.g.:
+ *
+ * struct A {}; // if this was forward-declaration: compilation error
+ * struct B {
+ * struct { // anonymous struct
+ * struct A y;
+ * } *x;
+ * };
+ *
+ * In this case, struct B's field x is a pointer, so it's size is known
+ * regardless of the size of (anonymous) struct it points to. But because this
+ * struct is anonymous and thus defined inline inside struct B, *and* it
+ * embeds struct A, compiler requires full definition of struct A to be known
+ * before struct B can be defined. This creates a transitive dependency
+ * between struct A and struct B. If struct A was forward-declared before
+ * struct B definition and fully defined after struct B definition, that would
+ * trigger compilation error.
+ *
+ * All this means that while we are doing topological sorting on BTF type
+ * graph, we need to determine relationships between different types (graph
+ * nodes):
+ * - weak link (relationship) between X and Y, if Y *CAN* be
+ * forward-declared at the point of X definition;
+ * - strong link, if Y *HAS* to be fully-defined before X can be defined.
+ *
+ * The rule is as follows. Given a chain of BTF types from X to Y, if there is
+ * BTF_KIND_PTR type in the chain and at least one non-anonymous type
+ * Z (excluding X, including Y), then link is weak. Otherwise, it's strong.
+ * Weak/strong relationship is determined recursively during DFS traversal and
+ * is returned as a result from btf_dump_order_type().
+ *
+ * btf_dump_order_type() is trying to avoid unnecessary forward declarations,
+ * but it is not guaranteeing that no extraneous forward declarations will be
+ * emitted.
+ *
+ * To avoid extra work, algorithm marks some of BTF types as ORDERED, when
+ * it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT,
+ * ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the
+ * entire graph path, so depending where from one came to that BTF type, it
+ * might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM,
+ * once they are processed, there is no need to do it again, so they are
+ * marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces
+ * weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But
+ * in any case, once those are processed, no need to do it again, as the
+ * result won't change.
+ *
+ * Returns:
+ * - 1, if type is part of strong link (so there is strong topological
+ * ordering requirements);
+ * - 0, if type is part of weak link (so can be satisfied through forward
+ * declaration);
+ * - <0, on error (e.g., unsatisfiable type loop detected).
+ */
+static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
+{
+ /*
+ * Order state is used to detect strong link cycles, but only for BTF
+ * kinds that are or could be an independent definition (i.e.,
+ * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays,
+ * func_protos, modifiers are just means to get to these definitions.
+ * Int/void don't need definitions, they are assumed to be always
+ * properly defined. We also ignore datasec, var, and funcs for now.
+ * So for all non-defining kinds, we never even set ordering state,
+ * for defining kinds we set ORDERING and subsequently ORDERED if it
+ * forms a strong link.
+ */
+ struct btf_dump_type_aux_state *tstate = &d->type_states[id];
+ const struct btf_type *t;
+ __u16 vlen;
+ int err, i;
+
+ /* return true, letting typedefs know that it's ok to be emitted */
+ if (tstate->order_state == ORDERED)
+ return 1;
+
+ t = btf__type_by_id(d->btf, id);
+
+ if (tstate->order_state == ORDERING) {
+ /* type loop, but resolvable through fwd declaration */
+ if (btf_is_composite(t) && through_ptr && t->name_off != 0)
+ return 0;
+ pr_warning("unsatisfiable type cycle, id:[%u]\n", id);
+ return -ELOOP;
+ }
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_INT:
+ tstate->order_state = ORDERED;
+ return 0;
+
+ case BTF_KIND_PTR:
+ err = btf_dump_order_type(d, t->type, true);
+ tstate->order_state = ORDERED;
+ return err;
+
+ case BTF_KIND_ARRAY:
+ return btf_dump_order_type(d, btf_array(t)->type, through_ptr);
+
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m = btf_members(t);
+ /*
+ * struct/union is part of strong link, only if it's embedded
+ * (so no ptr in a path) or it's anonymous (so has to be
+ * defined inline, even if declared through ptr)
+ */
+ if (through_ptr && t->name_off != 0)
+ return 0;
+
+ tstate->order_state = ORDERING;
+
+ vlen = btf_vlen(t);
+ for (i = 0; i < vlen; i++, m++) {
+ err = btf_dump_order_type(d, m->type, false);
+ if (err < 0)
+ return err;
+ }
+
+ if (t->name_off != 0) {
+ err = btf_dump_add_emit_queue_id(d, id);
+ if (err < 0)
+ return err;
+ }
+
+ tstate->order_state = ORDERED;
+ return 1;
+ }
+ case BTF_KIND_ENUM:
+ case BTF_KIND_FWD:
+ if (t->name_off != 0) {
+ err = btf_dump_add_emit_queue_id(d, id);
+ if (err)
+ return err;
+ }
+ tstate->order_state = ORDERED;
+ return 1;
+
+ case BTF_KIND_TYPEDEF: {
+ int is_strong;
+
+ is_strong = btf_dump_order_type(d, t->type, through_ptr);
+ if (is_strong < 0)
+ return is_strong;
+
+ /* typedef is similar to struct/union w.r.t. fwd-decls */
+ if (through_ptr && !is_strong)
+ return 0;
+
+ /* typedef is always a named definition */
+ err = btf_dump_add_emit_queue_id(d, id);
+ if (err)
+ return err;
+
+ d->type_states[id].order_state = ORDERED;
+ return 1;
+ }
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ return btf_dump_order_type(d, t->type, through_ptr);
+
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *p = btf_params(t);
+ bool is_strong;
+
+ err = btf_dump_order_type(d, t->type, through_ptr);
+ if (err < 0)
+ return err;
+ is_strong = err > 0;
+
+ vlen = btf_vlen(t);
+ for (i = 0; i < vlen; i++, p++) {
+ err = btf_dump_order_type(d, p->type, through_ptr);
+ if (err < 0)
+ return err;
+ if (err > 0)
+ is_strong = true;
+ }
+ return is_strong;
+ }
+ case BTF_KIND_FUNC:
+ case BTF_KIND_VAR:
+ case BTF_KIND_DATASEC:
+ d->type_states[id].order_state = ORDERED;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
+ const struct btf_type *t);
+static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t, int lvl);
+
+static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
+ const struct btf_type *t);
+static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t, int lvl);
+
+static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t);
+
+static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t, int lvl);
+
+/* a local view into a shared stack */
+struct id_stack {
+ const __u32 *ids;
+ int cnt;
+};
+
+static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
+ const char *fname, int lvl);
+static void btf_dump_emit_type_chain(struct btf_dump *d,
+ struct id_stack *decl_stack,
+ const char *fname, int lvl);
+
+static const char *btf_dump_type_name(struct btf_dump *d, __u32 id);
+static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id);
+static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
+ const char *orig_name);
+
+static bool btf_dump_is_blacklisted(struct btf_dump *d, __u32 id)
+{
+ const struct btf_type *t = btf__type_by_id(d->btf, id);
+
+ /* __builtin_va_list is a compiler built-in, which causes compilation
+ * errors, when compiling w/ different compiler, then used to compile
+ * original code (e.g., GCC to compile kernel, Clang to use generated
+ * C header from BTF). As it is built-in, it should be already defined
+ * properly internally in compiler.
+ */
+ if (t->name_off == 0)
+ return false;
+ return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0;
+}
+
+/*
+ * Emit C-syntax definitions of types from chains of BTF types.
+ *
+ * High-level handling of determining necessary forward declarations are handled
+ * by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type
+ * declarations/definitions in C syntax are handled by a combo of
+ * btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to
+ * corresponding btf_dump_emit_*_{def,fwd}() functions.
+ *
+ * We also keep track of "containing struct/union type ID" to determine when
+ * we reference it from inside and thus can avoid emitting unnecessary forward
+ * declaration.
+ *
+ * This algorithm is designed in such a way, that even if some error occurs
+ * (either technical, e.g., out of memory, or logical, i.e., malformed BTF
+ * that doesn't comply to C rules completely), algorithm will try to proceed
+ * and produce as much meaningful output as possible.
+ */
+static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
+{
+ struct btf_dump_type_aux_state *tstate = &d->type_states[id];
+ bool top_level_def = cont_id == 0;
+ const struct btf_type *t;
+ __u16 kind;
+
+ if (tstate->emit_state == EMITTED)
+ return;
+
+ t = btf__type_by_id(d->btf, id);
+ kind = btf_kind(t);
+
+ if (top_level_def && t->name_off == 0) {
+ pr_warning("unexpected nameless definition, id:[%u]\n", id);
+ return;
+ }
+
+ if (tstate->emit_state == EMITTING) {
+ if (tstate->fwd_emitted)
+ return;
+
+ switch (kind) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ /*
+ * if we are referencing a struct/union that we are
+ * part of - then no need for fwd declaration
+ */
+ if (id == cont_id)
+ return;
+ if (t->name_off == 0) {
+ pr_warning("anonymous struct/union loop, id:[%u]\n",
+ id);
+ return;
+ }
+ btf_dump_emit_struct_fwd(d, id, t);
+ btf_dump_printf(d, ";\n\n");
+ tstate->fwd_emitted = 1;
+ break;
+ case BTF_KIND_TYPEDEF:
+ /*
+ * for typedef fwd_emitted means typedef definition
+ * was emitted, but it can be used only for "weak"
+ * references through pointer only, not for embedding
+ */
+ if (!btf_dump_is_blacklisted(d, id)) {
+ btf_dump_emit_typedef_def(d, id, t, 0);
+ btf_dump_printf(d, ";\n\n");
+ };
+ tstate->fwd_emitted = 1;
+ break;
+ default:
+ break;
+ }
+
+ return;
+ }
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ tstate->emit_state = EMITTED;
+ break;
+ case BTF_KIND_ENUM:
+ if (top_level_def) {
+ btf_dump_emit_enum_def(d, id, t, 0);
+ btf_dump_printf(d, ";\n\n");
+ }
+ tstate->emit_state = EMITTED;
+ break;
+ case BTF_KIND_PTR:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ btf_dump_emit_type(d, t->type, cont_id);
+ break;
+ case BTF_KIND_ARRAY:
+ btf_dump_emit_type(d, btf_array(t)->type, cont_id);
+ break;
+ case BTF_KIND_FWD:
+ btf_dump_emit_fwd_def(d, id, t);
+ btf_dump_printf(d, ";\n\n");
+ tstate->emit_state = EMITTED;
+ break;
+ case BTF_KIND_TYPEDEF:
+ tstate->emit_state = EMITTING;
+ btf_dump_emit_type(d, t->type, id);
+ /*
+ * typedef can server as both definition and forward
+ * declaration; at this stage someone depends on
+ * typedef as a forward declaration (refers to it
+ * through pointer), so unless we already did it,
+ * emit typedef as a forward declaration
+ */
+ if (!tstate->fwd_emitted && !btf_dump_is_blacklisted(d, id)) {
+ btf_dump_emit_typedef_def(d, id, t, 0);
+ btf_dump_printf(d, ";\n\n");
+ }
+ tstate->emit_state = EMITTED;
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ tstate->emit_state = EMITTING;
+ /* if it's a top-level struct/union definition or struct/union
+ * is anonymous, then in C we'll be emitting all fields and
+ * their types (as opposed to just `struct X`), so we need to
+ * make sure that all types, referenced from struct/union
+ * members have necessary forward-declarations, where
+ * applicable
+ */
+ if (top_level_def || t->name_off == 0) {
+ const struct btf_member *m = btf_members(t);
+ __u16 vlen = btf_vlen(t);
+ int i, new_cont_id;
+
+ new_cont_id = t->name_off == 0 ? cont_id : id;
+ for (i = 0; i < vlen; i++, m++)
+ btf_dump_emit_type(d, m->type, new_cont_id);
+ } else if (!tstate->fwd_emitted && id != cont_id) {
+ btf_dump_emit_struct_fwd(d, id, t);
+ btf_dump_printf(d, ";\n\n");
+ tstate->fwd_emitted = 1;
+ }
+
+ if (top_level_def) {
+ btf_dump_emit_struct_def(d, id, t, 0);
+ btf_dump_printf(d, ";\n\n");
+ tstate->emit_state = EMITTED;
+ } else {
+ tstate->emit_state = NOT_EMITTED;
+ }
+ break;
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *p = btf_params(t);
+ __u16 vlen = btf_vlen(t);
+ int i;
+
+ btf_dump_emit_type(d, t->type, cont_id);
+ for (i = 0; i < vlen; i++, p++)
+ btf_dump_emit_type(d, p->type, cont_id);
+
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int btf_align_of(const struct btf *btf, __u32 id)
+{
+ const struct btf_type *t = btf__type_by_id(btf, id);
+ __u16 kind = btf_kind(t);
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ case BTF_KIND_ENUM:
+ return min(sizeof(void *), t->size);
+ case BTF_KIND_PTR:
+ return sizeof(void *);
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ return btf_align_of(btf, t->type);
+ case BTF_KIND_ARRAY:
+ return btf_align_of(btf, btf_array(t)->type);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m = btf_members(t);
+ __u16 vlen = btf_vlen(t);
+ int i, align = 1;
+
+ for (i = 0; i < vlen; i++, m++)
+ align = max(align, btf_align_of(btf, m->type));
+
+ return align;
+ }
+ default:
+ pr_warning("unsupported BTF_KIND:%u\n", btf_kind(t));
+ return 1;
+ }
+}
+
+static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
+ const struct btf_type *t)
+{
+ const struct btf_member *m;
+ int align, i, bit_sz;
+ __u16 vlen;
+
+ align = btf_align_of(btf, id);
+ /* size of a non-packed struct has to be a multiple of its alignment*/
+ if (t->size % align)
+ return true;
+
+ m = btf_members(t);
+ vlen = btf_vlen(t);
+ /* all non-bitfield fields have to be naturally aligned */
+ for (i = 0; i < vlen; i++, m++) {
+ align = btf_align_of(btf, m->type);
+ bit_sz = btf_member_bitfield_size(t, i);
+ if (bit_sz == 0 && m->offset % (8 * align) != 0)
+ return true;
+ }
+
+ /*
+ * if original struct was marked as packed, but its layout is
+ * naturally aligned, we'll detect that it's not packed
+ */
+ return false;
+}
+
+static int chip_away_bits(int total, int at_most)
+{
+ return total % at_most ? : at_most;
+}
+
+static void btf_dump_emit_bit_padding(const struct btf_dump *d,
+ int cur_off, int m_off, int m_bit_sz,
+ int align, int lvl)
+{
+ int off_diff = m_off - cur_off;
+ int ptr_bits = sizeof(void *) * 8;
+
+ if (off_diff <= 0)
+ /* no gap */
+ return;
+ if (m_bit_sz == 0 && off_diff < align * 8)
+ /* natural padding will take care of a gap */
+ return;
+
+ while (off_diff > 0) {
+ const char *pad_type;
+ int pad_bits;
+
+ if (ptr_bits > 32 && off_diff > 32) {
+ pad_type = "long";
+ pad_bits = chip_away_bits(off_diff, ptr_bits);
+ } else if (off_diff > 16) {
+ pad_type = "int";
+ pad_bits = chip_away_bits(off_diff, 32);
+ } else if (off_diff > 8) {
+ pad_type = "short";
+ pad_bits = chip_away_bits(off_diff, 16);
+ } else {
+ pad_type = "char";
+ pad_bits = chip_away_bits(off_diff, 8);
+ }
+ btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
+ off_diff -= pad_bits;
+ }
+}
+
+static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
+ const struct btf_type *t)
+{
+ btf_dump_printf(d, "%s %s",
+ btf_is_struct(t) ? "struct" : "union",
+ btf_dump_type_name(d, id));
+}
+
+static void btf_dump_emit_struct_def(struct btf_dump *d,
+ __u32 id,
+ const struct btf_type *t,
+ int lvl)
+{
+ const struct btf_member *m = btf_members(t);
+ bool is_struct = btf_is_struct(t);
+ int align, i, packed, off = 0;
+ __u16 vlen = btf_vlen(t);
+
+ packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
+ align = packed ? 1 : btf_align_of(d->btf, id);
+
+ btf_dump_printf(d, "%s%s%s {",
+ is_struct ? "struct" : "union",
+ t->name_off ? " " : "",
+ btf_dump_type_name(d, id));
+
+ for (i = 0; i < vlen; i++, m++) {
+ const char *fname;
+ int m_off, m_sz;
+
+ fname = btf_name_of(d, m->name_off);
+ m_sz = btf_member_bitfield_size(t, i);
+ m_off = btf_member_bit_offset(t, i);
+ align = packed ? 1 : btf_align_of(d->btf, m->type);
+
+ btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1);
+ btf_dump_printf(d, "\n%s", pfx(lvl + 1));
+ btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
+
+ if (m_sz) {
+ btf_dump_printf(d, ": %d", m_sz);
+ off = m_off + m_sz;
+ } else {
+ m_sz = max(0, btf__resolve_size(d->btf, m->type));
+ off = m_off + m_sz * 8;
+ }
+ btf_dump_printf(d, ";");
+ }
+
+ if (vlen)
+ btf_dump_printf(d, "\n");
+ btf_dump_printf(d, "%s}", pfx(lvl));
+ if (packed)
+ btf_dump_printf(d, " __attribute__((packed))");
+}
+
+static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
+ const struct btf_type *t)
+{
+ btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
+}
+
+static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t,
+ int lvl)
+{
+ const struct btf_enum *v = btf_enum(t);
+ __u16 vlen = btf_vlen(t);
+ const char *name;
+ size_t dup_cnt;
+ int i;
+
+ btf_dump_printf(d, "enum%s%s",
+ t->name_off ? " " : "",
+ btf_dump_type_name(d, id));
+
+ if (vlen) {
+ btf_dump_printf(d, " {");
+ for (i = 0; i < vlen; i++, v++) {
+ name = btf_name_of(d, v->name_off);
+ /* enumerators share namespace with typedef idents */
+ dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
+ if (dup_cnt > 1) {
+ btf_dump_printf(d, "\n%s%s___%zu = %d,",
+ pfx(lvl + 1), name, dup_cnt,
+ (__s32)v->val);
+ } else {
+ btf_dump_printf(d, "\n%s%s = %d,",
+ pfx(lvl + 1), name,
+ (__s32)v->val);
+ }
+ }
+ btf_dump_printf(d, "\n%s}", pfx(lvl));
+ }
+}
+
+static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t)
+{
+ const char *name = btf_dump_type_name(d, id);
+
+ if (btf_kflag(t))
+ btf_dump_printf(d, "union %s", name);
+ else
+ btf_dump_printf(d, "struct %s", name);
+}
+
+static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t, int lvl)
+{
+ const char *name = btf_dump_ident_name(d, id);
+
+ btf_dump_printf(d, "typedef ");
+ btf_dump_emit_type_decl(d, t->type, name, lvl);
+}
+
+static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id)
+{
+ __u32 *new_stack;
+ size_t new_cap;
+
+ if (d->decl_stack_cnt >= d->decl_stack_cap) {
+ new_cap = max(16, d->decl_stack_cap * 3 / 2);
+ new_stack = realloc(d->decl_stack,
+ new_cap * sizeof(new_stack[0]));
+ if (!new_stack)
+ return -ENOMEM;
+ d->decl_stack = new_stack;
+ d->decl_stack_cap = new_cap;
+ }
+
+ d->decl_stack[d->decl_stack_cnt++] = id;
+
+ return 0;
+}
+
+/*
+ * Emit type declaration (e.g., field type declaration in a struct or argument
+ * declaration in function prototype) in correct C syntax.
+ *
+ * For most types it's trivial, but there are few quirky type declaration
+ * cases worth mentioning:
+ * - function prototypes (especially nesting of function prototypes);
+ * - arrays;
+ * - const/volatile/restrict for pointers vs other types.
+ *
+ * For a good discussion of *PARSING* C syntax (as a human), see
+ * Peter van der Linden's "Expert C Programming: Deep C Secrets",
+ * Ch.3 "Unscrambling Declarations in C".
+ *
+ * It won't help with BTF to C conversion much, though, as it's an opposite
+ * problem. So we came up with this algorithm in reverse to van der Linden's
+ * parsing algorithm. It goes from structured BTF representation of type
+ * declaration to a valid compilable C syntax.
+ *
+ * For instance, consider this C typedef:
+ * typedef const int * const * arr[10] arr_t;
+ * It will be represented in BTF with this chain of BTF types:
+ * [typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int]
+ *
+ * Notice how [const] modifier always goes before type it modifies in BTF type
+ * graph, but in C syntax, const/volatile/restrict modifiers are written to
+ * the right of pointers, but to the left of other types. There are also other
+ * quirks, like function pointers, arrays of them, functions returning other
+ * functions, etc.
+ *
+ * We handle that by pushing all the types to a stack, until we hit "terminal"
+ * type (int/enum/struct/union/fwd). Then depending on the kind of a type on
+ * top of a stack, modifiers are handled differently. Array/function pointers
+ * have also wildly different syntax and how nesting of them are done. See
+ * code for authoritative definition.
+ *
+ * To avoid allocating new stack for each independent chain of BTF types, we
+ * share one bigger stack, with each chain working only on its own local view
+ * of a stack frame. Some care is required to "pop" stack frames after
+ * processing type declaration chain.
+ */
+static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
+ const char *fname, int lvl)
+{
+ struct id_stack decl_stack;
+ const struct btf_type *t;
+ int err, stack_start;
+
+ stack_start = d->decl_stack_cnt;
+ for (;;) {
+ err = btf_dump_push_decl_stack_id(d, id);
+ if (err < 0) {
+ /*
+ * if we don't have enough memory for entire type decl
+ * chain, restore stack, emit warning, and try to
+ * proceed nevertheless
+ */
+ pr_warning("not enough memory for decl stack:%d", err);
+ d->decl_stack_cnt = stack_start;
+ return;
+ }
+
+ /* VOID */
+ if (id == 0)
+ break;
+
+ t = btf__type_by_id(d->btf, id);
+ switch (btf_kind(t)) {
+ case BTF_KIND_PTR:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_FUNC_PROTO:
+ id = t->type;
+ break;
+ case BTF_KIND_ARRAY:
+ id = btf_array(t)->type;
+ break;
+ case BTF_KIND_INT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_FWD:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_TYPEDEF:
+ goto done;
+ default:
+ pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
+ btf_kind(t), id);
+ goto done;
+ }
+ }
+done:
+ /*
+ * We might be inside a chain of declarations (e.g., array of function
+ * pointers returning anonymous (so inlined) structs, having another
+ * array field). Each of those needs its own "stack frame" to handle
+ * emitting of declarations. Those stack frames are non-overlapping
+ * portions of shared btf_dump->decl_stack. To make it a bit nicer to
+ * handle this set of nested stacks, we create a view corresponding to
+ * our own "stack frame" and work with it as an independent stack.
+ * We'll need to clean up after emit_type_chain() returns, though.
+ */
+ decl_stack.ids = d->decl_stack + stack_start;
+ decl_stack.cnt = d->decl_stack_cnt - stack_start;
+ btf_dump_emit_type_chain(d, &decl_stack, fname, lvl);
+ /*
+ * emit_type_chain() guarantees that it will pop its entire decl_stack
+ * frame before returning. But it works with a read-only view into
+ * decl_stack, so it doesn't actually pop anything from the
+ * perspective of shared btf_dump->decl_stack, per se. We need to
+ * reset decl_stack state to how it was before us to avoid it growing
+ * all the time.
+ */
+ d->decl_stack_cnt = stack_start;
+}
+
+static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack)
+{
+ const struct btf_type *t;
+ __u32 id;
+
+ while (decl_stack->cnt) {
+ id = decl_stack->ids[decl_stack->cnt - 1];
+ t = btf__type_by_id(d->btf, id);
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_VOLATILE:
+ btf_dump_printf(d, "volatile ");
+ break;
+ case BTF_KIND_CONST:
+ btf_dump_printf(d, "const ");
+ break;
+ case BTF_KIND_RESTRICT:
+ btf_dump_printf(d, "restrict ");
+ break;
+ default:
+ return;
+ }
+ decl_stack->cnt--;
+ }
+}
+
+static void btf_dump_emit_name(const struct btf_dump *d,
+ const char *name, bool last_was_ptr)
+{
+ bool separate = name[0] && !last_was_ptr;
+
+ btf_dump_printf(d, "%s%s", separate ? " " : "", name);
+}
+
+static void btf_dump_emit_type_chain(struct btf_dump *d,
+ struct id_stack *decls,
+ const char *fname, int lvl)
+{
+ /*
+ * last_was_ptr is used to determine if we need to separate pointer
+ * asterisk (*) from previous part of type signature with space, so
+ * that we get `int ***`, instead of `int * * *`. We default to true
+ * for cases where we have single pointer in a chain. E.g., in ptr ->
+ * func_proto case. func_proto will start a new emit_type_chain call
+ * with just ptr, which should be emitted as (*) or (*<fname>), so we
+ * don't want to prepend space for that last pointer.
+ */
+ bool last_was_ptr = true;
+ const struct btf_type *t;
+ const char *name;
+ __u16 kind;
+ __u32 id;
+
+ while (decls->cnt) {
+ id = decls->ids[--decls->cnt];
+ if (id == 0) {
+ /* VOID is a special snowflake */
+ btf_dump_emit_mods(d, decls);
+ btf_dump_printf(d, "void");
+ last_was_ptr = false;
+ continue;
+ }
+
+ t = btf__type_by_id(d->btf, id);
+ kind = btf_kind(t);
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ btf_dump_emit_mods(d, decls);
+ name = btf_name_of(d, t->name_off);
+ btf_dump_printf(d, "%s", name);
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ btf_dump_emit_mods(d, decls);
+ /* inline anonymous struct/union */
+ if (t->name_off == 0)
+ btf_dump_emit_struct_def(d, id, t, lvl);
+ else
+ btf_dump_emit_struct_fwd(d, id, t);
+ break;
+ case BTF_KIND_ENUM:
+ btf_dump_emit_mods(d, decls);
+ /* inline anonymous enum */
+ if (t->name_off == 0)
+ btf_dump_emit_enum_def(d, id, t, lvl);
+ else
+ btf_dump_emit_enum_fwd(d, id, t);
+ break;
+ case BTF_KIND_FWD:
+ btf_dump_emit_mods(d, decls);
+ btf_dump_emit_fwd_def(d, id, t);
+ break;
+ case BTF_KIND_TYPEDEF:
+ btf_dump_emit_mods(d, decls);
+ btf_dump_printf(d, "%s", btf_dump_ident_name(d, id));
+ break;
+ case BTF_KIND_PTR:
+ btf_dump_printf(d, "%s", last_was_ptr ? "*" : " *");
+ break;
+ case BTF_KIND_VOLATILE:
+ btf_dump_printf(d, " volatile");
+ break;
+ case BTF_KIND_CONST:
+ btf_dump_printf(d, " const");
+ break;
+ case BTF_KIND_RESTRICT:
+ btf_dump_printf(d, " restrict");
+ break;
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *a = btf_array(t);
+ const struct btf_type *next_t;
+ __u32 next_id;
+ bool multidim;
+ /*
+ * GCC has a bug
+ * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354)
+ * which causes it to emit extra const/volatile
+ * modifiers for an array, if array's element type has
+ * const/volatile modifiers. Clang doesn't do that.
+ * In general, it doesn't seem very meaningful to have
+ * a const/volatile modifier for array, so we are
+ * going to silently skip them here.
+ */
+ while (decls->cnt) {
+ next_id = decls->ids[decls->cnt - 1];
+ next_t = btf__type_by_id(d->btf, next_id);
+ if (btf_is_mod(next_t))
+ decls->cnt--;
+ else
+ break;
+ }
+
+ if (decls->cnt == 0) {
+ btf_dump_emit_name(d, fname, last_was_ptr);
+ btf_dump_printf(d, "[%u]", a->nelems);
+ return;
+ }
+
+ next_t = btf__type_by_id(d->btf, next_id);
+ multidim = btf_is_array(next_t);
+ /* we need space if we have named non-pointer */
+ if (fname[0] && !last_was_ptr)
+ btf_dump_printf(d, " ");
+ /* no parentheses for multi-dimensional array */
+ if (!multidim)
+ btf_dump_printf(d, "(");
+ btf_dump_emit_type_chain(d, decls, fname, lvl);
+ if (!multidim)
+ btf_dump_printf(d, ")");
+ btf_dump_printf(d, "[%u]", a->nelems);
+ return;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *p = btf_params(t);
+ __u16 vlen = btf_vlen(t);
+ int i;
+
+ btf_dump_emit_mods(d, decls);
+ if (decls->cnt) {
+ btf_dump_printf(d, " (");
+ btf_dump_emit_type_chain(d, decls, fname, lvl);
+ btf_dump_printf(d, ")");
+ } else {
+ btf_dump_emit_name(d, fname, last_was_ptr);
+ }
+ btf_dump_printf(d, "(");
+ /*
+ * Clang for BPF target generates func_proto with no
+ * args as a func_proto with a single void arg (e.g.,
+ * `int (*f)(void)` vs just `int (*f)()`). We are
+ * going to pretend there are no args for such case.
+ */
+ if (vlen == 1 && p->type == 0) {
+ btf_dump_printf(d, ")");
+ return;
+ }
+
+ for (i = 0; i < vlen; i++, p++) {
+ if (i > 0)
+ btf_dump_printf(d, ", ");
+
+ /* last arg of type void is vararg */
+ if (i == vlen - 1 && p->type == 0) {
+ btf_dump_printf(d, "...");
+ break;
+ }
+
+ name = btf_name_of(d, p->name_off);
+ btf_dump_emit_type_decl(d, p->type, name, lvl);
+ }
+
+ btf_dump_printf(d, ")");
+ return;
+ }
+ default:
+ pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
+ kind, id);
+ return;
+ }
+
+ last_was_ptr = kind == BTF_KIND_PTR;
+ }
+
+ btf_dump_emit_name(d, fname, last_was_ptr);
+}
+
+/* return number of duplicates (occurrences) of a given name */
+static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
+ const char *orig_name)
+{
+ size_t dup_cnt = 0;
+
+ hashmap__find(name_map, orig_name, (void **)&dup_cnt);
+ dup_cnt++;
+ hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL);
+
+ return dup_cnt;
+}
+
+static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id,
+ struct hashmap *name_map)
+{
+ struct btf_dump_type_aux_state *s = &d->type_states[id];
+ const struct btf_type *t = btf__type_by_id(d->btf, id);
+ const char *orig_name = btf_name_of(d, t->name_off);
+ const char **cached_name = &d->cached_names[id];
+ size_t dup_cnt;
+
+ if (t->name_off == 0)
+ return "";
+
+ if (s->name_resolved)
+ return *cached_name ? *cached_name : orig_name;
+
+ dup_cnt = btf_dump_name_dups(d, name_map, orig_name);
+ if (dup_cnt > 1) {
+ const size_t max_len = 256;
+ char new_name[max_len];
+
+ snprintf(new_name, max_len, "%s___%zu", orig_name, dup_cnt);
+ *cached_name = strdup(new_name);
+ }
+
+ s->name_resolved = 1;
+ return *cached_name ? *cached_name : orig_name;
+}
+
+static const char *btf_dump_type_name(struct btf_dump *d, __u32 id)
+{
+ return btf_dump_resolve_name(d, id, d->type_names);
+}
+
+static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id)
+{
+ return btf_dump_resolve_name(d, id, d->ident_names);
+}
diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c
new file mode 100644
index 000000000000..6122272943e6
--- /dev/null
+++ b/tools/lib/bpf/hashmap.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * Generic non-thread safe hash map implementation.
+ *
+ * Copyright (c) 2019 Facebook
+ */
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <linux/err.h>
+#include "hashmap.h"
+
+/* start with 4 buckets */
+#define HASHMAP_MIN_CAP_BITS 2
+
+static void hashmap_add_entry(struct hashmap_entry **pprev,
+ struct hashmap_entry *entry)
+{
+ entry->next = *pprev;
+ *pprev = entry;
+}
+
+static void hashmap_del_entry(struct hashmap_entry **pprev,
+ struct hashmap_entry *entry)
+{
+ *pprev = entry->next;
+ entry->next = NULL;
+}
+
+void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn, void *ctx)
+{
+ map->hash_fn = hash_fn;
+ map->equal_fn = equal_fn;
+ map->ctx = ctx;
+
+ map->buckets = NULL;
+ map->cap = 0;
+ map->cap_bits = 0;
+ map->sz = 0;
+}
+
+struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn,
+ void *ctx)
+{
+ struct hashmap *map = malloc(sizeof(struct hashmap));
+
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+ hashmap__init(map, hash_fn, equal_fn, ctx);
+ return map;
+}
+
+void hashmap__clear(struct hashmap *map)
+{
+ free(map->buckets);
+ map->cap = map->cap_bits = map->sz = 0;
+}
+
+void hashmap__free(struct hashmap *map)
+{
+ if (!map)
+ return;
+
+ hashmap__clear(map);
+ free(map);
+}
+
+size_t hashmap__size(const struct hashmap *map)
+{
+ return map->sz;
+}
+
+size_t hashmap__capacity(const struct hashmap *map)
+{
+ return map->cap;
+}
+
+static bool hashmap_needs_to_grow(struct hashmap *map)
+{
+ /* grow if empty or more than 75% filled */
+ return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
+}
+
+static int hashmap_grow(struct hashmap *map)
+{
+ struct hashmap_entry **new_buckets;
+ struct hashmap_entry *cur, *tmp;
+ size_t new_cap_bits, new_cap;
+ size_t h;
+ int bkt;
+
+ new_cap_bits = map->cap_bits + 1;
+ if (new_cap_bits < HASHMAP_MIN_CAP_BITS)
+ new_cap_bits = HASHMAP_MIN_CAP_BITS;
+
+ new_cap = 1UL << new_cap_bits;
+ new_buckets = calloc(new_cap, sizeof(new_buckets[0]));
+ if (!new_buckets)
+ return -ENOMEM;
+
+ hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
+ h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
+ hashmap_add_entry(&new_buckets[h], cur);
+ }
+
+ map->cap = new_cap;
+ map->cap_bits = new_cap_bits;
+ free(map->buckets);
+ map->buckets = new_buckets;
+
+ return 0;
+}
+
+static bool hashmap_find_entry(const struct hashmap *map,
+ const void *key, size_t hash,
+ struct hashmap_entry ***pprev,
+ struct hashmap_entry **entry)
+{
+ struct hashmap_entry *cur, **prev_ptr;
+
+ if (!map->buckets)
+ return false;
+
+ for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
+ cur;
+ prev_ptr = &cur->next, cur = cur->next) {
+ if (map->equal_fn(cur->key, key, map->ctx)) {
+ if (pprev)
+ *pprev = prev_ptr;
+ *entry = cur;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int hashmap__insert(struct hashmap *map, const void *key, void *value,
+ enum hashmap_insert_strategy strategy,
+ const void **old_key, void **old_value)
+{
+ struct hashmap_entry *entry;
+ size_t h;
+ int err;
+
+ if (old_key)
+ *old_key = NULL;
+ if (old_value)
+ *old_value = NULL;
+
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ if (strategy != HASHMAP_APPEND &&
+ hashmap_find_entry(map, key, h, NULL, &entry)) {
+ if (old_key)
+ *old_key = entry->key;
+ if (old_value)
+ *old_value = entry->value;
+
+ if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) {
+ entry->key = key;
+ entry->value = value;
+ return 0;
+ } else if (strategy == HASHMAP_ADD) {
+ return -EEXIST;
+ }
+ }
+
+ if (strategy == HASHMAP_UPDATE)
+ return -ENOENT;
+
+ if (hashmap_needs_to_grow(map)) {
+ err = hashmap_grow(map);
+ if (err)
+ return err;
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ }
+
+ entry = malloc(sizeof(struct hashmap_entry));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key = key;
+ entry->value = value;
+ hashmap_add_entry(&map->buckets[h], entry);
+ map->sz++;
+
+ return 0;
+}
+
+bool hashmap__find(const struct hashmap *map, const void *key, void **value)
+{
+ struct hashmap_entry *entry;
+ size_t h;
+
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ if (!hashmap_find_entry(map, key, h, NULL, &entry))
+ return false;
+
+ if (value)
+ *value = entry->value;
+ return true;
+}
+
+bool hashmap__delete(struct hashmap *map, const void *key,
+ const void **old_key, void **old_value)
+{
+ struct hashmap_entry **pprev, *entry;
+ size_t h;
+
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ if (!hashmap_find_entry(map, key, h, &pprev, &entry))
+ return false;
+
+ if (old_key)
+ *old_key = entry->key;
+ if (old_value)
+ *old_value = entry->value;
+
+ hashmap_del_entry(pprev, entry);
+ free(entry);
+ map->sz--;
+
+ return true;
+}
+
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
new file mode 100644
index 000000000000..bae8879cdf58
--- /dev/null
+++ b/tools/lib/bpf/hashmap.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/*
+ * Generic non-thread safe hash map implementation.
+ *
+ * Copyright (c) 2019 Facebook
+ */
+#ifndef __LIBBPF_HASHMAP_H
+#define __LIBBPF_HASHMAP_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#ifdef __GLIBC__
+#include <bits/wordsize.h>
+#else
+#include <bits/reg.h>
+#endif
+#include "libbpf_internal.h"
+
+static inline size_t hash_bits(size_t h, int bits)
+{
+ /* shuffle bits and return requested number of upper bits */
+ return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
+}
+
+typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
+typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx);
+
+struct hashmap_entry {
+ const void *key;
+ void *value;
+ struct hashmap_entry *next;
+};
+
+struct hashmap {
+ hashmap_hash_fn hash_fn;
+ hashmap_equal_fn equal_fn;
+ void *ctx;
+
+ struct hashmap_entry **buckets;
+ size_t cap;
+ size_t cap_bits;
+ size_t sz;
+};
+
+#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \
+ .hash_fn = (hash_fn), \
+ .equal_fn = (equal_fn), \
+ .ctx = (ctx), \
+ .buckets = NULL, \
+ .cap = 0, \
+ .cap_bits = 0, \
+ .sz = 0, \
+}
+
+void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn, void *ctx);
+struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn,
+ void *ctx);
+void hashmap__clear(struct hashmap *map);
+void hashmap__free(struct hashmap *map);
+
+size_t hashmap__size(const struct hashmap *map);
+size_t hashmap__capacity(const struct hashmap *map);
+
+/*
+ * Hashmap insertion strategy:
+ * - HASHMAP_ADD - only add key/value if key doesn't exist yet;
+ * - HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
+ * update value;
+ * - HASHMAP_UPDATE - update value, if key already exists; otherwise, do
+ * nothing and return -ENOENT;
+ * - HASHMAP_APPEND - always add key/value pair, even if key already exists.
+ * This turns hashmap into a multimap by allowing multiple values to be
+ * associated with the same key. Most useful read API for such hashmap is
+ * hashmap__for_each_key_entry() iteration. If hashmap__find() is still
+ * used, it will return last inserted key/value entry (first in a bucket
+ * chain).
+ */
+enum hashmap_insert_strategy {
+ HASHMAP_ADD,
+ HASHMAP_SET,
+ HASHMAP_UPDATE,
+ HASHMAP_APPEND,
+};
+
+/*
+ * hashmap__insert() adds key/value entry w/ various semantics, depending on
+ * provided strategy value. If a given key/value pair replaced already
+ * existing key/value pair, both old key and old value will be returned
+ * through old_key and old_value to allow calling code do proper memory
+ * management.
+ */
+int hashmap__insert(struct hashmap *map, const void *key, void *value,
+ enum hashmap_insert_strategy strategy,
+ const void **old_key, void **old_value);
+
+static inline int hashmap__add(struct hashmap *map,
+ const void *key, void *value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL);
+}
+
+static inline int hashmap__set(struct hashmap *map,
+ const void *key, void *value,
+ const void **old_key, void **old_value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_SET,
+ old_key, old_value);
+}
+
+static inline int hashmap__update(struct hashmap *map,
+ const void *key, void *value,
+ const void **old_key, void **old_value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_UPDATE,
+ old_key, old_value);
+}
+
+static inline int hashmap__append(struct hashmap *map,
+ const void *key, void *value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL);
+}
+
+bool hashmap__delete(struct hashmap *map, const void *key,
+ const void **old_key, void **old_value);
+
+bool hashmap__find(const struct hashmap *map, const void *key, void **value);
+
+/*
+ * hashmap__for_each_entry - iterate over all entries in hashmap
+ * @map: hashmap to iterate
+ * @cur: struct hashmap_entry * used as a loop cursor
+ * @bkt: integer used as a bucket loop cursor
+ */
+#define hashmap__for_each_entry(map, cur, bkt) \
+ for (bkt = 0; bkt < map->cap; bkt++) \
+ for (cur = map->buckets[bkt]; cur; cur = cur->next)
+
+/*
+ * hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe
+ * against removals
+ * @map: hashmap to iterate
+ * @cur: struct hashmap_entry * used as a loop cursor
+ * @tmp: struct hashmap_entry * used as a temporary next cursor storage
+ * @bkt: integer used as a bucket loop cursor
+ */
+#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
+ for (bkt = 0; bkt < map->cap; bkt++) \
+ for (cur = map->buckets[bkt]; \
+ cur && ({tmp = cur->next; true; }); \
+ cur = tmp)
+
+/*
+ * hashmap__for_each_key_entry - iterate over entries associated with given key
+ * @map: hashmap to iterate
+ * @cur: struct hashmap_entry * used as a loop cursor
+ * @key: key to iterate entries for
+ */
+#define hashmap__for_each_key_entry(map, cur, _key) \
+ for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
+ map->cap_bits); \
+ map->buckets ? map->buckets[bkt] : NULL; }); \
+ cur; \
+ cur = cur->next) \
+ if (map->equal_fn(cur->key, (_key), map->ctx))
+
+#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
+ for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
+ map->cap_bits); \
+ cur = map->buckets ? map->buckets[bkt] : NULL; }); \
+ cur && ({ tmp = cur->next; true; }); \
+ cur = tmp) \
+ if (map->equal_fn(cur->key, (_key), map->ctx))
+
+#endif /* __LIBBPF_HASHMAP_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 7e3b79d7c25f..e0276520171b 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -20,6 +20,7 @@
#include <inttypes.h>
#include <string.h>
#include <unistd.h>
+#include <endian.h>
#include <fcntl.h>
#include <errno.h>
#include <asm/unistd.h>
@@ -32,9 +33,13 @@
#include <linux/limits.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
+#include <sys/epoll.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
+#include <sys/utsname.h>
#include <tools/libc_compat.h>
#include <libelf.h>
#include <gelf.h>
@@ -43,8 +48,8 @@
#include "bpf.h"
#include "btf.h"
#include "str_error.h"
-#include "libbpf_util.h"
#include "libbpf_internal.h"
+#include "hashmap.h"
#ifndef EM_BPF
#define EM_BPF 247
@@ -72,9 +77,12 @@ static int __base_pr(enum libbpf_print_level level, const char *format,
static libbpf_print_fn_t __libbpf_pr = __base_pr;
-void libbpf_set_print(libbpf_print_fn_t fn)
+libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
{
+ libbpf_print_fn_t old_print_fn = __libbpf_pr;
+
__libbpf_pr = fn;
+ return old_print_fn;
}
__printf(2, 3)
@@ -179,7 +187,6 @@ struct bpf_program {
bpf_program_clear_priv_t clear_priv;
enum bpf_attach_type expected_attach_type;
- int btf_fd;
void *func_info;
__u32 func_info_rec_size;
__u32 func_info_cnt;
@@ -189,6 +196,7 @@ struct bpf_program {
void *line_info;
__u32 line_info_rec_size;
__u32 line_info_cnt;
+ __u32 prog_flags;
};
enum libbpf_map_type {
@@ -207,7 +215,8 @@ static const char * const libbpf_type_to_btf_name[] = {
struct bpf_map {
int fd;
char *name;
- size_t offset;
+ int sec_idx;
+ size_t sec_offset;
int map_ifindex;
int inner_map_fd;
struct bpf_map_def def;
@@ -234,6 +243,7 @@ struct bpf_object {
size_t nr_programs;
struct bpf_map *maps;
size_t nr_maps;
+ size_t maps_cap;
struct bpf_secdata sections;
bool loaded;
@@ -260,6 +270,7 @@ struct bpf_object {
} *reloc;
int nr_reloc;
int maps_shndx;
+ int btf_maps_shndx;
int text_shndx;
int data_shndx;
int rodata_shndx;
@@ -306,7 +317,6 @@ void bpf_program__unload(struct bpf_program *prog)
prog->instances.nr = -1;
zfree(&prog->instances.fds);
- zclose(prog->btf_fd);
zfree(&prog->func_info);
zfree(&prog->line_info);
}
@@ -349,8 +359,11 @@ static int
bpf_program__init(void *data, size_t size, char *section_name, int idx,
struct bpf_program *prog)
{
- if (size < sizeof(struct bpf_insn)) {
- pr_warning("corrupted section '%s'\n", section_name);
+ const size_t bpf_insn_sz = sizeof(struct bpf_insn);
+
+ if (size == 0 || size % bpf_insn_sz) {
+ pr_warning("corrupted section '%s', size: %zu\n",
+ section_name, size);
return -EINVAL;
}
@@ -376,14 +389,12 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
section_name);
goto errout;
}
- prog->insns_cnt = size / sizeof(struct bpf_insn);
- memcpy(prog->insns, data,
- prog->insns_cnt * sizeof(struct bpf_insn));
+ prog->insns_cnt = size / bpf_insn_sz;
+ memcpy(prog->insns, data, size);
prog->idx = idx;
prog->instances.fds = NULL;
prog->instances.nr = -1;
prog->type = BPF_PROG_TYPE_UNSPEC;
- prog->btf_fd = -1;
return 0;
errout:
@@ -495,15 +506,14 @@ static struct bpf_object *bpf_object__new(const char *path,
strcpy(obj->path, path);
/* Using basename() GNU version which doesn't modify arg. */
- strncpy(obj->name, basename((void *)path),
- sizeof(obj->name) - 1);
+ strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1);
end = strchr(obj->name, '.');
if (end)
*end = 0;
obj->efile.fd = -1;
/*
- * Caller of this function should also calls
+ * Caller of this function should also call
* bpf_object__elf_finish() after data collection to return
* obj_buf to user. If not, we should duplicate the buffer to
* avoid user freeing them before elf finish.
@@ -511,6 +521,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.maps_shndx = -1;
+ obj->efile.btf_maps_shndx = -1;
obj->efile.data_shndx = -1;
obj->efile.rodata_shndx = -1;
obj->efile.bss_shndx = -1;
@@ -563,38 +574,35 @@ static int bpf_object__elf_init(struct bpf_object *obj)
} else {
obj->efile.fd = open(obj->path, O_RDONLY);
if (obj->efile.fd < 0) {
- char errmsg[STRERR_BUFSIZE];
- char *cp = libbpf_strerror_r(errno, errmsg,
- sizeof(errmsg));
+ char errmsg[STRERR_BUFSIZE], *cp;
+ err = -errno;
+ cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warning("failed to open %s: %s\n", obj->path, cp);
- return -errno;
+ return err;
}
obj->efile.elf = elf_begin(obj->efile.fd,
- LIBBPF_ELF_C_READ_MMAP,
- NULL);
+ LIBBPF_ELF_C_READ_MMAP, NULL);
}
if (!obj->efile.elf) {
- pr_warning("failed to open %s as ELF file\n",
- obj->path);
+ pr_warning("failed to open %s as ELF file\n", obj->path);
err = -LIBBPF_ERRNO__LIBELF;
goto errout;
}
if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
- pr_warning("failed to get EHDR from %s\n",
- obj->path);
+ pr_warning("failed to get EHDR from %s\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
ep = &obj->efile.ehdr;
/* Old LLVM set e_machine to EM_NONE */
- if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
- pr_warning("%s is not an eBPF object file\n",
- obj->path);
+ if (ep->e_type != ET_REL ||
+ (ep->e_machine && ep->e_machine != EM_BPF)) {
+ pr_warning("%s is not an eBPF object file\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
@@ -605,47 +613,31 @@ errout:
return err;
}
-static int
-bpf_object__check_endianness(struct bpf_object *obj)
+static int bpf_object__check_endianness(struct bpf_object *obj)
{
- static unsigned int const endian = 1;
-
- switch (obj->efile.ehdr.e_ident[EI_DATA]) {
- case ELFDATA2LSB:
- /* We are big endian, BPF obj is little endian. */
- if (*(unsigned char const *)&endian != 1)
- goto mismatch;
- break;
-
- case ELFDATA2MSB:
- /* We are little endian, BPF obj is big endian. */
- if (*(unsigned char const *)&endian != 0)
- goto mismatch;
- break;
- default:
- return -LIBBPF_ERRNO__ENDIAN;
- }
-
- return 0;
-
-mismatch:
- pr_warning("Error: endianness mismatch.\n");
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return 0;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
+ return 0;
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+ pr_warning("endianness mismatch.\n");
return -LIBBPF_ERRNO__ENDIAN;
}
static int
-bpf_object__init_license(struct bpf_object *obj,
- void *data, size_t size)
+bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
{
- memcpy(obj->license, data,
- min(size, sizeof(obj->license) - 1));
+ memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
pr_debug("license of %s is %s\n", obj->path, obj->license);
return 0;
}
static int
-bpf_object__init_kversion(struct bpf_object *obj,
- void *data, size_t size)
+bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
{
__u32 kver;
@@ -655,8 +647,7 @@ bpf_object__init_kversion(struct bpf_object *obj,
}
memcpy(&kver, data, sizeof(kver));
obj->kern_version = kver;
- pr_debug("kernel version of %s is %x\n", obj->path,
- obj->kern_version);
+ pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
return 0;
}
@@ -665,7 +656,9 @@ static int compare_bpf_map(const void *_a, const void *_b)
const struct bpf_map *a = _a;
const struct bpf_map *b = _b;
- return a->offset - b->offset;
+ if (a->sec_idx != b->sec_idx)
+ return a->sec_idx - b->sec_idx;
+ return a->sec_offset - b->sec_offset;
}
static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
@@ -782,24 +775,55 @@ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
return -ENOENT;
}
-static bool bpf_object__has_maps(const struct bpf_object *obj)
+static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
{
- return obj->efile.maps_shndx >= 0 ||
- obj->efile.data_shndx >= 0 ||
- obj->efile.rodata_shndx >= 0 ||
- obj->efile.bss_shndx >= 0;
+ struct bpf_map *new_maps;
+ size_t new_cap;
+ int i;
+
+ if (obj->nr_maps < obj->maps_cap)
+ return &obj->maps[obj->nr_maps++];
+
+ new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
+ new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
+ if (!new_maps) {
+ pr_warning("alloc maps for object failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ obj->maps_cap = new_cap;
+ obj->maps = new_maps;
+
+ /* zero out new maps */
+ memset(obj->maps + obj->nr_maps, 0,
+ (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
+ /*
+ * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
+ * when failure (zclose won't close negative fd)).
+ */
+ for (i = obj->nr_maps; i < obj->maps_cap; i++) {
+ obj->maps[i].fd = -1;
+ obj->maps[i].inner_map_fd = -1;
+ }
+
+ return &obj->maps[obj->nr_maps++];
}
static int
-bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
- enum libbpf_map_type type, Elf_Data *data,
- void **data_buff)
+bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
+ int sec_idx, Elf_Data *data, void **data_buff)
{
- struct bpf_map_def *def = &map->def;
char map_name[BPF_OBJ_NAME_LEN];
+ struct bpf_map_def *def;
+ struct bpf_map *map;
+
+ map = bpf_object__add_map(obj);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
map->libbpf_type = type;
- map->offset = ~(typeof(map->offset))0;
+ map->sec_idx = sec_idx;
+ map->sec_offset = 0;
snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
libbpf_type_to_btf_name[type]);
map->name = strdup(map_name);
@@ -807,13 +831,15 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
pr_warning("failed to alloc map name\n");
return -ENOMEM;
}
+ pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n",
+ map_name, map->sec_idx, map->sec_offset);
+ def = &map->def;
def->type = BPF_MAP_TYPE_ARRAY;
def->key_size = sizeof(int);
def->value_size = data->d_size;
def->max_entries = 1;
- def->map_flags = type == LIBBPF_MAP_RODATA ?
- BPF_F_RDONLY_PROG : 0;
+ def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
if (data_buff) {
*data_buff = malloc(data->d_size);
if (!*data_buff) {
@@ -828,30 +854,61 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
return 0;
}
-static int
-bpf_object__init_maps(struct bpf_object *obj, int flags)
+static int bpf_object__init_global_data_maps(struct bpf_object *obj)
+{
+ int err;
+
+ if (!obj->caps.global_data)
+ return 0;
+ /*
+ * Populate obj->maps with libbpf internal maps.
+ */
+ if (obj->efile.data_shndx >= 0) {
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
+ obj->efile.data_shndx,
+ obj->efile.data,
+ &obj->sections.data);
+ if (err)
+ return err;
+ }
+ if (obj->efile.rodata_shndx >= 0) {
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
+ obj->efile.rodata_shndx,
+ obj->efile.rodata,
+ &obj->sections.rodata);
+ if (err)
+ return err;
+ }
+ if (obj->efile.bss_shndx >= 0) {
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
+ obj->efile.bss_shndx,
+ obj->efile.bss, NULL);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
{
- int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0;
- bool strict = !(flags & MAPS_RELAX_COMPAT);
Elf_Data *symbols = obj->efile.symbols;
+ int i, map_def_sz = 0, nr_maps = 0, nr_syms;
Elf_Data *data = NULL;
- int ret = 0;
+ Elf_Scn *scn;
+
+ if (obj->efile.maps_shndx < 0)
+ return 0;
if (!symbols)
return -EINVAL;
- nr_syms = symbols->d_size / sizeof(GElf_Sym);
-
- if (obj->efile.maps_shndx >= 0) {
- Elf_Scn *scn = elf_getscn(obj->efile.elf,
- obj->efile.maps_shndx);
- if (scn)
- data = elf_getdata(scn, NULL);
- if (!scn || !data) {
- pr_warning("failed to get Elf_Data from map section %d\n",
- obj->efile.maps_shndx);
- return -EINVAL;
- }
+ scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
+ if (scn)
+ data = elf_getdata(scn, NULL);
+ if (!scn || !data) {
+ pr_warning("failed to get Elf_Data from map section %d\n",
+ obj->efile.maps_shndx);
+ return -EINVAL;
}
/*
@@ -861,16 +918,8 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
*
* TODO: Detect array of map and report error.
*/
- if (obj->caps.global_data) {
- if (obj->efile.data_shndx >= 0)
- nr_maps_glob++;
- if (obj->efile.rodata_shndx >= 0)
- nr_maps_glob++;
- if (obj->efile.bss_shndx >= 0)
- nr_maps_glob++;
- }
-
- for (i = 0; data && i < nr_syms; i++) {
+ nr_syms = symbols->d_size / sizeof(GElf_Sym);
+ for (i = 0; i < nr_syms; i++) {
GElf_Sym sym;
if (!gelf_getsym(symbols, i, &sym))
@@ -879,74 +928,59 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
continue;
nr_maps++;
}
-
- if (!nr_maps && !nr_maps_glob)
- return 0;
-
/* Assume equally sized map definitions */
- if (data) {
- pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
- nr_maps, data->d_size);
-
- map_def_sz = data->d_size / nr_maps;
- if (!data->d_size || (data->d_size % nr_maps) != 0) {
- pr_warning("unable to determine map definition size "
- "section %s, %d maps in %zd bytes\n",
- obj->path, nr_maps, data->d_size);
- return -EINVAL;
- }
- }
-
- nr_maps += nr_maps_glob;
- obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
- if (!obj->maps) {
- pr_warning("alloc maps for object failed\n");
- return -ENOMEM;
- }
- obj->nr_maps = nr_maps;
-
- for (i = 0; i < nr_maps; i++) {
- /*
- * fill all fd with -1 so won't close incorrect
- * fd (fd=0 is stdin) when failure (zclose won't close
- * negative fd)).
- */
- obj->maps[i].fd = -1;
- obj->maps[i].inner_map_fd = -1;
+ pr_debug("maps in %s: %d maps in %zd bytes\n",
+ obj->path, nr_maps, data->d_size);
+
+ map_def_sz = data->d_size / nr_maps;
+ if (!data->d_size || (data->d_size % nr_maps) != 0) {
+ pr_warning("unable to determine map definition size "
+ "section %s, %d maps in %zd bytes\n",
+ obj->path, nr_maps, data->d_size);
+ return -EINVAL;
}
- /*
- * Fill obj->maps using data in "maps" section.
- */
- for (i = 0, map_idx = 0; data && i < nr_syms; i++) {
+ /* Fill obj->maps using data in "maps" section. */
+ for (i = 0; i < nr_syms; i++) {
GElf_Sym sym;
const char *map_name;
struct bpf_map_def *def;
+ struct bpf_map *map;
if (!gelf_getsym(symbols, i, &sym))
continue;
if (sym.st_shndx != obj->efile.maps_shndx)
continue;
- map_name = elf_strptr(obj->efile.elf,
- obj->efile.strtabidx,
+ map = bpf_object__add_map(obj);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
sym.st_name);
+ if (!map_name) {
+ pr_warning("failed to get map #%d name sym string for obj %s\n",
+ i, obj->path);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
- obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC;
- obj->maps[map_idx].offset = sym.st_value;
+ map->libbpf_type = LIBBPF_MAP_UNSPEC;
+ map->sec_idx = sym.st_shndx;
+ map->sec_offset = sym.st_value;
+ pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
+ map_name, map->sec_idx, map->sec_offset);
if (sym.st_value + map_def_sz > data->d_size) {
pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
obj->path, map_name);
return -EINVAL;
}
- obj->maps[map_idx].name = strdup(map_name);
- if (!obj->maps[map_idx].name) {
+ map->name = strdup(map_name);
+ if (!map->name) {
pr_warning("failed to alloc map name\n");
return -ENOMEM;
}
- pr_debug("map %d is \"%s\"\n", map_idx,
- obj->maps[map_idx].name);
+ pr_debug("map %d is \"%s\"\n", i, map->name);
def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
/*
* If the definition of the map in the object file fits in
@@ -955,7 +989,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
* calloc above.
*/
if (map_def_sz <= sizeof(struct bpf_map_def)) {
- memcpy(&obj->maps[map_idx].def, def, map_def_sz);
+ memcpy(&map->def, def, map_def_sz);
} else {
/*
* Here the map structure being read is bigger than what
@@ -975,37 +1009,336 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
return -EINVAL;
}
}
- memcpy(&obj->maps[map_idx].def, def,
- sizeof(struct bpf_map_def));
+ memcpy(&map->def, def, sizeof(struct bpf_map_def));
}
- map_idx++;
}
+ return 0;
+}
- if (!obj->caps.global_data)
- goto finalize;
+static const struct btf_type *
+skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
+{
+ const struct btf_type *t = btf__type_by_id(btf, id);
- /*
- * Populate rest of obj->maps with libbpf internal maps.
- */
- if (obj->efile.data_shndx >= 0)
- ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
- LIBBPF_MAP_DATA,
- obj->efile.data,
- &obj->sections.data);
- if (!ret && obj->efile.rodata_shndx >= 0)
- ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
- LIBBPF_MAP_RODATA,
- obj->efile.rodata,
- &obj->sections.rodata);
- if (!ret && obj->efile.bss_shndx >= 0)
- ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
- LIBBPF_MAP_BSS,
- obj->efile.bss, NULL);
-finalize:
- if (!ret)
+ if (res_id)
+ *res_id = id;
+
+ while (btf_is_mod(t) || btf_is_typedef(t)) {
+ if (res_id)
+ *res_id = t->type;
+ t = btf__type_by_id(btf, t->type);
+ }
+
+ return t;
+}
+
+/*
+ * Fetch integer attribute of BTF map definition. Such attributes are
+ * represented using a pointer to an array, in which dimensionality of array
+ * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
+ * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
+ * type definition, while using only sizeof(void *) space in ELF data section.
+ */
+static bool get_map_field_int(const char *map_name, const struct btf *btf,
+ const struct btf_type *def,
+ const struct btf_member *m, __u32 *res) {
+ const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
+ const char *name = btf__name_by_offset(btf, m->name_off);
+ const struct btf_array *arr_info;
+ const struct btf_type *arr_t;
+
+ if (!btf_is_ptr(t)) {
+ pr_warning("map '%s': attr '%s': expected PTR, got %u.\n",
+ map_name, name, btf_kind(t));
+ return false;
+ }
+
+ arr_t = btf__type_by_id(btf, t->type);
+ if (!arr_t) {
+ pr_warning("map '%s': attr '%s': type [%u] not found.\n",
+ map_name, name, t->type);
+ return false;
+ }
+ if (!btf_is_array(arr_t)) {
+ pr_warning("map '%s': attr '%s': expected ARRAY, got %u.\n",
+ map_name, name, btf_kind(arr_t));
+ return false;
+ }
+ arr_info = btf_array(arr_t);
+ *res = arr_info->nelems;
+ return true;
+}
+
+static int bpf_object__init_user_btf_map(struct bpf_object *obj,
+ const struct btf_type *sec,
+ int var_idx, int sec_idx,
+ const Elf_Data *data, bool strict)
+{
+ const struct btf_type *var, *def, *t;
+ const struct btf_var_secinfo *vi;
+ const struct btf_var *var_extra;
+ const struct btf_member *m;
+ const char *map_name;
+ struct bpf_map *map;
+ int vlen, i;
+
+ vi = btf_var_secinfos(sec) + var_idx;
+ var = btf__type_by_id(obj->btf, vi->type);
+ var_extra = btf_var(var);
+ map_name = btf__name_by_offset(obj->btf, var->name_off);
+ vlen = btf_vlen(var);
+
+ if (map_name == NULL || map_name[0] == '\0') {
+ pr_warning("map #%d: empty name.\n", var_idx);
+ return -EINVAL;
+ }
+ if ((__u64)vi->offset + vi->size > data->d_size) {
+ pr_warning("map '%s' BTF data is corrupted.\n", map_name);
+ return -EINVAL;
+ }
+ if (!btf_is_var(var)) {
+ pr_warning("map '%s': unexpected var kind %u.\n",
+ map_name, btf_kind(var));
+ return -EINVAL;
+ }
+ if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
+ var_extra->linkage != BTF_VAR_STATIC) {
+ pr_warning("map '%s': unsupported var linkage %u.\n",
+ map_name, var_extra->linkage);
+ return -EOPNOTSUPP;
+ }
+
+ def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
+ if (!btf_is_struct(def)) {
+ pr_warning("map '%s': unexpected def kind %u.\n",
+ map_name, btf_kind(var));
+ return -EINVAL;
+ }
+ if (def->size > vi->size) {
+ pr_warning("map '%s': invalid def size.\n", map_name);
+ return -EINVAL;
+ }
+
+ map = bpf_object__add_map(obj);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ map->name = strdup(map_name);
+ if (!map->name) {
+ pr_warning("map '%s': failed to alloc map name.\n", map_name);
+ return -ENOMEM;
+ }
+ map->libbpf_type = LIBBPF_MAP_UNSPEC;
+ map->def.type = BPF_MAP_TYPE_UNSPEC;
+ map->sec_idx = sec_idx;
+ map->sec_offset = vi->offset;
+ pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
+ map_name, map->sec_idx, map->sec_offset);
+
+ vlen = btf_vlen(def);
+ m = btf_members(def);
+ for (i = 0; i < vlen; i++, m++) {
+ const char *name = btf__name_by_offset(obj->btf, m->name_off);
+
+ if (!name) {
+ pr_warning("map '%s': invalid field #%d.\n",
+ map_name, i);
+ return -EINVAL;
+ }
+ if (strcmp(name, "type") == 0) {
+ if (!get_map_field_int(map_name, obj->btf, def, m,
+ &map->def.type))
+ return -EINVAL;
+ pr_debug("map '%s': found type = %u.\n",
+ map_name, map->def.type);
+ } else if (strcmp(name, "max_entries") == 0) {
+ if (!get_map_field_int(map_name, obj->btf, def, m,
+ &map->def.max_entries))
+ return -EINVAL;
+ pr_debug("map '%s': found max_entries = %u.\n",
+ map_name, map->def.max_entries);
+ } else if (strcmp(name, "map_flags") == 0) {
+ if (!get_map_field_int(map_name, obj->btf, def, m,
+ &map->def.map_flags))
+ return -EINVAL;
+ pr_debug("map '%s': found map_flags = %u.\n",
+ map_name, map->def.map_flags);
+ } else if (strcmp(name, "key_size") == 0) {
+ __u32 sz;
+
+ if (!get_map_field_int(map_name, obj->btf, def, m,
+ &sz))
+ return -EINVAL;
+ pr_debug("map '%s': found key_size = %u.\n",
+ map_name, sz);
+ if (map->def.key_size && map->def.key_size != sz) {
+ pr_warning("map '%s': conflicting key size %u != %u.\n",
+ map_name, map->def.key_size, sz);
+ return -EINVAL;
+ }
+ map->def.key_size = sz;
+ } else if (strcmp(name, "key") == 0) {
+ __s64 sz;
+
+ t = btf__type_by_id(obj->btf, m->type);
+ if (!t) {
+ pr_warning("map '%s': key type [%d] not found.\n",
+ map_name, m->type);
+ return -EINVAL;
+ }
+ if (!btf_is_ptr(t)) {
+ pr_warning("map '%s': key spec is not PTR: %u.\n",
+ map_name, btf_kind(t));
+ return -EINVAL;
+ }
+ sz = btf__resolve_size(obj->btf, t->type);
+ if (sz < 0) {
+ pr_warning("map '%s': can't determine key size for type [%u]: %lld.\n",
+ map_name, t->type, sz);
+ return sz;
+ }
+ pr_debug("map '%s': found key [%u], sz = %lld.\n",
+ map_name, t->type, sz);
+ if (map->def.key_size && map->def.key_size != sz) {
+ pr_warning("map '%s': conflicting key size %u != %lld.\n",
+ map_name, map->def.key_size, sz);
+ return -EINVAL;
+ }
+ map->def.key_size = sz;
+ map->btf_key_type_id = t->type;
+ } else if (strcmp(name, "value_size") == 0) {
+ __u32 sz;
+
+ if (!get_map_field_int(map_name, obj->btf, def, m,
+ &sz))
+ return -EINVAL;
+ pr_debug("map '%s': found value_size = %u.\n",
+ map_name, sz);
+ if (map->def.value_size && map->def.value_size != sz) {
+ pr_warning("map '%s': conflicting value size %u != %u.\n",
+ map_name, map->def.value_size, sz);
+ return -EINVAL;
+ }
+ map->def.value_size = sz;
+ } else if (strcmp(name, "value") == 0) {
+ __s64 sz;
+
+ t = btf__type_by_id(obj->btf, m->type);
+ if (!t) {
+ pr_warning("map '%s': value type [%d] not found.\n",
+ map_name, m->type);
+ return -EINVAL;
+ }
+ if (!btf_is_ptr(t)) {
+ pr_warning("map '%s': value spec is not PTR: %u.\n",
+ map_name, btf_kind(t));
+ return -EINVAL;
+ }
+ sz = btf__resolve_size(obj->btf, t->type);
+ if (sz < 0) {
+ pr_warning("map '%s': can't determine value size for type [%u]: %lld.\n",
+ map_name, t->type, sz);
+ return sz;
+ }
+ pr_debug("map '%s': found value [%u], sz = %lld.\n",
+ map_name, t->type, sz);
+ if (map->def.value_size && map->def.value_size != sz) {
+ pr_warning("map '%s': conflicting value size %u != %lld.\n",
+ map_name, map->def.value_size, sz);
+ return -EINVAL;
+ }
+ map->def.value_size = sz;
+ map->btf_value_type_id = t->type;
+ } else {
+ if (strict) {
+ pr_warning("map '%s': unknown field '%s'.\n",
+ map_name, name);
+ return -ENOTSUP;
+ }
+ pr_debug("map '%s': ignoring unknown field '%s'.\n",
+ map_name, name);
+ }
+ }
+
+ if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
+ pr_warning("map '%s': map type isn't specified.\n", map_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
+{
+ const struct btf_type *sec = NULL;
+ int nr_types, i, vlen, err;
+ const struct btf_type *t;
+ const char *name;
+ Elf_Data *data;
+ Elf_Scn *scn;
+
+ if (obj->efile.btf_maps_shndx < 0)
+ return 0;
+
+ scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
+ if (scn)
+ data = elf_getdata(scn, NULL);
+ if (!scn || !data) {
+ pr_warning("failed to get Elf_Data from map section %d (%s)\n",
+ obj->efile.maps_shndx, MAPS_ELF_SEC);
+ return -EINVAL;
+ }
+
+ nr_types = btf__get_nr_types(obj->btf);
+ for (i = 1; i <= nr_types; i++) {
+ t = btf__type_by_id(obj->btf, i);
+ if (!btf_is_datasec(t))
+ continue;
+ name = btf__name_by_offset(obj->btf, t->name_off);
+ if (strcmp(name, MAPS_ELF_SEC) == 0) {
+ sec = t;
+ break;
+ }
+ }
+
+ if (!sec) {
+ pr_warning("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
+ return -ENOENT;
+ }
+
+ vlen = btf_vlen(sec);
+ for (i = 0; i < vlen; i++) {
+ err = bpf_object__init_user_btf_map(obj, sec, i,
+ obj->efile.btf_maps_shndx,
+ data, strict);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bpf_object__init_maps(struct bpf_object *obj, int flags)
+{
+ bool strict = !(flags & MAPS_RELAX_COMPAT);
+ int err;
+
+ err = bpf_object__init_user_maps(obj, strict);
+ if (err)
+ return err;
+
+ err = bpf_object__init_user_btf_maps(obj, strict);
+ if (err)
+ return err;
+
+ err = bpf_object__init_global_data_maps(obj);
+ if (err)
+ return err;
+
+ if (obj->nr_maps) {
qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
compare_bpf_map);
- return ret;
+ }
+ return 0;
}
static bool section_have_execinstr(struct bpf_object *obj, int idx)
@@ -1033,24 +1366,27 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
struct btf *btf = obj->btf;
struct btf_type *t;
int i, j, vlen;
- __u16 kind;
if (!obj->btf || (has_func && has_datasec))
return;
for (i = 1; i <= btf__get_nr_types(btf); i++) {
t = (struct btf_type *)btf__type_by_id(btf, i);
- kind = BTF_INFO_KIND(t->info);
- if (!has_datasec && kind == BTF_KIND_VAR) {
+ if (!has_datasec && btf_is_var(t)) {
/* replace VAR with INT */
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
- t->size = sizeof(int);
- *(int *)(t+1) = BTF_INT_ENC(0, 0, 32);
- } else if (!has_datasec && kind == BTF_KIND_DATASEC) {
+ /*
+ * using size = 1 is the safest choice, 4 will be too
+ * big and cause kernel BTF validation failure if
+ * original variable took less than 4 bytes
+ */
+ t->size = 1;
+ *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
+ } else if (!has_datasec && btf_is_datasec(t)) {
/* replace DATASEC with STRUCT */
- struct btf_var_secinfo *v = (void *)(t + 1);
- struct btf_member *m = (void *)(t + 1);
+ const struct btf_var_secinfo *v = btf_var_secinfos(t);
+ struct btf_member *m = btf_members(t);
struct btf_type *vt;
char *name;
@@ -1061,7 +1397,7 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
name++;
}
- vlen = BTF_INFO_VLEN(t->info);
+ vlen = btf_vlen(t);
t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
for (j = 0; j < vlen; j++, v++, m++) {
/* order of field assignments is important */
@@ -1071,12 +1407,12 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
vt = (void *)btf__type_by_id(btf, v->type);
m->name_off = vt->name_off;
}
- } else if (!has_func && kind == BTF_KIND_FUNC_PROTO) {
+ } else if (!has_func && btf_is_func_proto(t)) {
/* replace FUNC_PROTO with ENUM */
- vlen = BTF_INFO_VLEN(t->info);
+ vlen = btf_vlen(t);
t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
t->size = sizeof(__u32); /* kernel enforced */
- } else if (!has_func && kind == BTF_KIND_FUNC) {
+ } else if (!has_func && btf_is_func(t)) {
/* replace FUNC with TYPEDEF */
t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
}
@@ -1094,6 +1430,92 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
}
}
+static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
+{
+ return obj->efile.btf_maps_shndx >= 0;
+}
+
+static int bpf_object__init_btf(struct bpf_object *obj,
+ Elf_Data *btf_data,
+ Elf_Data *btf_ext_data)
+{
+ bool btf_required = bpf_object__is_btf_mandatory(obj);
+ int err = 0;
+
+ if (btf_data) {
+ obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
+ if (IS_ERR(obj->btf)) {
+ pr_warning("Error loading ELF section %s: %d.\n",
+ BTF_ELF_SEC, err);
+ goto out;
+ }
+ err = btf__finalize_data(obj, obj->btf);
+ if (err) {
+ pr_warning("Error finalizing %s: %d.\n",
+ BTF_ELF_SEC, err);
+ goto out;
+ }
+ }
+ if (btf_ext_data) {
+ if (!obj->btf) {
+ pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
+ BTF_EXT_ELF_SEC, BTF_ELF_SEC);
+ goto out;
+ }
+ obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
+ btf_ext_data->d_size);
+ if (IS_ERR(obj->btf_ext)) {
+ pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
+ BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
+ obj->btf_ext = NULL;
+ goto out;
+ }
+ }
+out:
+ if (err || IS_ERR(obj->btf)) {
+ if (btf_required)
+ err = err ? : PTR_ERR(obj->btf);
+ else
+ err = 0;
+ if (!IS_ERR_OR_NULL(obj->btf))
+ btf__free(obj->btf);
+ obj->btf = NULL;
+ }
+ if (btf_required && !obj->btf) {
+ pr_warning("BTF is required, but is missing or corrupted.\n");
+ return err == 0 ? -ENOENT : err;
+ }
+ return 0;
+}
+
+static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
+{
+ int err = 0;
+
+ if (!obj->btf)
+ return 0;
+
+ bpf_object__sanitize_btf(obj);
+ bpf_object__sanitize_btf_ext(obj);
+
+ err = btf__load(obj->btf);
+ if (err) {
+ pr_warning("Error loading %s into kernel: %d.\n",
+ BTF_ELF_SEC, err);
+ btf__free(obj->btf);
+ obj->btf = NULL;
+ /* btf_ext can't exist without btf, so free it as well */
+ if (obj->btf_ext) {
+ btf_ext__free(obj->btf_ext);
+ obj->btf_ext = NULL;
+ }
+
+ if (bpf_object__is_btf_mandatory(obj))
+ return err;
+ }
+ return 0;
+}
+
static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
{
Elf *elf = obj->efile.elf;
@@ -1105,8 +1527,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
/* Elf is corrupted/truncated, avoid calling elf_strptr. */
if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
- pr_warning("failed to get e_shstrndx from %s\n",
- obj->path);
+ pr_warning("failed to get e_shstrndx from %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
@@ -1119,24 +1540,21 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
if (gelf_getshdr(scn, &sh) != &sh) {
pr_warning("failed to get section(%d) header from %s\n",
idx, obj->path);
- err = -LIBBPF_ERRNO__FORMAT;
- goto out;
+ return -LIBBPF_ERRNO__FORMAT;
}
name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
if (!name) {
pr_warning("failed to get section(%d) name from %s\n",
idx, obj->path);
- err = -LIBBPF_ERRNO__FORMAT;
- goto out;
+ return -LIBBPF_ERRNO__FORMAT;
}
data = elf_getdata(scn, 0);
if (!data) {
pr_warning("failed to get section(%d) data from %s(%s)\n",
idx, name, obj->path);
- err = -LIBBPF_ERRNO__FORMAT;
- goto out;
+ return -LIBBPF_ERRNO__FORMAT;
}
pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
idx, name, (unsigned long)data->d_size,
@@ -1147,12 +1565,18 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
err = bpf_object__init_license(obj,
data->d_buf,
data->d_size);
+ if (err)
+ return err;
} else if (strcmp(name, "version") == 0) {
err = bpf_object__init_kversion(obj,
data->d_buf,
data->d_size);
+ if (err)
+ return err;
} else if (strcmp(name, "maps") == 0) {
obj->efile.maps_shndx = idx;
+ } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
+ obj->efile.btf_maps_shndx = idx;
} else if (strcmp(name, BTF_ELF_SEC) == 0) {
btf_data = data;
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
@@ -1161,11 +1585,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
if (obj->efile.symbols) {
pr_warning("bpf: multiple SYMTAB in %s\n",
obj->path);
- err = -LIBBPF_ERRNO__FORMAT;
- } else {
- obj->efile.symbols = data;
- obj->efile.strtabidx = sh.sh_link;
+ return -LIBBPF_ERRNO__FORMAT;
}
+ obj->efile.symbols = data;
+ obj->efile.strtabidx = sh.sh_link;
} else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
if (sh.sh_flags & SHF_EXECINSTR) {
if (strcmp(name, ".text") == 0)
@@ -1179,6 +1602,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
pr_warning("failed to alloc program %s (%s): %s",
name, obj->path, cp);
+ return err;
}
} else if (strcmp(name, ".data") == 0) {
obj->efile.data = data;
@@ -1190,8 +1614,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
pr_debug("skip section(%d) %s\n", idx, name);
}
} else if (sh.sh_type == SHT_REL) {
+ int nr_reloc = obj->efile.nr_reloc;
void *reloc = obj->efile.reloc;
- int nr_reloc = obj->efile.nr_reloc + 1;
int sec = sh.sh_info; /* points to other section */
/* Only do relo for section with exec instructions */
@@ -1201,79 +1625,37 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
continue;
}
- reloc = reallocarray(reloc, nr_reloc,
+ reloc = reallocarray(reloc, nr_reloc + 1,
sizeof(*obj->efile.reloc));
if (!reloc) {
pr_warning("realloc failed\n");
- err = -ENOMEM;
- } else {
- int n = nr_reloc - 1;
+ return -ENOMEM;
+ }
- obj->efile.reloc = reloc;
- obj->efile.nr_reloc = nr_reloc;
+ obj->efile.reloc = reloc;
+ obj->efile.nr_reloc++;
- obj->efile.reloc[n].shdr = sh;
- obj->efile.reloc[n].data = data;
- }
+ obj->efile.reloc[nr_reloc].shdr = sh;
+ obj->efile.reloc[nr_reloc].data = data;
} else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
obj->efile.bss = data;
obj->efile.bss_shndx = idx;
} else {
pr_debug("skip section(%d) %s\n", idx, name);
}
- if (err)
- goto out;
}
if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
pr_warning("Corrupted ELF file: index of strtab invalid\n");
- return LIBBPF_ERRNO__FORMAT;
- }
- if (btf_data) {
- obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
- if (IS_ERR(obj->btf)) {
- pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
- BTF_ELF_SEC, PTR_ERR(obj->btf));
- obj->btf = NULL;
- } else {
- err = btf__finalize_data(obj, obj->btf);
- if (!err) {
- bpf_object__sanitize_btf(obj);
- err = btf__load(obj->btf);
- }
- if (err) {
- pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n",
- BTF_ELF_SEC, err);
- btf__free(obj->btf);
- obj->btf = NULL;
- err = 0;
- }
- }
- }
- if (btf_ext_data) {
- if (!obj->btf) {
- pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
- BTF_EXT_ELF_SEC, BTF_ELF_SEC);
- } else {
- obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
- btf_ext_data->d_size);
- if (IS_ERR(obj->btf_ext)) {
- pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
- BTF_EXT_ELF_SEC,
- PTR_ERR(obj->btf_ext));
- obj->btf_ext = NULL;
- } else {
- bpf_object__sanitize_btf_ext(obj);
- }
- }
+ return -LIBBPF_ERRNO__FORMAT;
}
- if (bpf_object__has_maps(obj)) {
+ err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
+ if (!err)
err = bpf_object__init_maps(obj, flags);
- if (err)
- goto out;
- }
- err = bpf_object__init_prog_names(obj);
-out:
+ if (!err)
+ err = bpf_object__sanitize_and_load_btf(obj);
+ if (!err)
+ err = bpf_object__init_prog_names(obj);
return err;
}
@@ -1292,7 +1674,8 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
}
struct bpf_program *
-bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
+bpf_object__find_program_by_title(const struct bpf_object *obj,
+ const char *title)
{
struct bpf_program *pos;
@@ -1314,7 +1697,8 @@ static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
int shndx)
{
- return shndx == obj->efile.maps_shndx;
+ return shndx == obj->efile.maps_shndx ||
+ shndx == obj->efile.btf_maps_shndx;
}
static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
@@ -1347,8 +1731,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
size_t nr_maps = obj->nr_maps;
int i, nrels;
- pr_debug("collecting relocating info for: '%s'\n",
- prog->section_name);
+ pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
nrels = shdr->sh_size / shdr->sh_entsize;
prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
@@ -1359,23 +1742,21 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
prog->nr_reloc = nrels;
for (i = 0; i < nrels; i++) {
- GElf_Sym sym;
- GElf_Rel rel;
- unsigned int insn_idx;
- unsigned int shdr_idx;
struct bpf_insn *insns = prog->insns;
enum libbpf_map_type type;
+ unsigned int insn_idx;
+ unsigned int shdr_idx;
const char *name;
size_t map_idx;
+ GElf_Sym sym;
+ GElf_Rel rel;
if (!gelf_getrel(data, i, &rel)) {
pr_warning("relocation: failed to get %d reloc\n", i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (!gelf_getsym(symbols,
- GELF_R_SYM(rel.r_info),
- &sym)) {
+ if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
pr_warning("relocation: symbol %"PRIx64" not found\n",
GELF_R_SYM(rel.r_info));
return -LIBBPF_ERRNO__FORMAT;
@@ -1389,15 +1770,22 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
(long long) sym.st_value, sym.st_name, name);
shdr_idx = sym.st_shndx;
+ insn_idx = rel.r_offset / sizeof(struct bpf_insn);
+ pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n",
+ insn_idx, shdr_idx);
+
+ if (shdr_idx >= SHN_LORESERVE) {
+ pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
+ name, shdr_idx, insn_idx,
+ insns[insn_idx].code);
+ return -LIBBPF_ERRNO__RELOC;
+ }
if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
prog->section_name, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
- insn_idx = rel.r_offset / sizeof(struct bpf_insn);
- pr_debug("relocation: insn_idx=%u\n", insn_idx);
-
if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
pr_warning("incorrect bpf_call opcode\n");
@@ -1436,16 +1824,19 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
if (maps[map_idx].libbpf_type != type)
continue;
if (type != LIBBPF_MAP_UNSPEC ||
- (type == LIBBPF_MAP_UNSPEC &&
- maps[map_idx].offset == sym.st_value)) {
- pr_debug("relocation: find map %zd (%s) for insn %u\n",
- map_idx, maps[map_idx].name, insn_idx);
+ (maps[map_idx].sec_idx == sym.st_shndx &&
+ maps[map_idx].sec_offset == sym.st_value)) {
+ pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n",
+ map_idx, maps[map_idx].name,
+ maps[map_idx].sec_idx,
+ maps[map_idx].sec_offset,
+ insn_idx);
break;
}
}
if (map_idx >= nr_maps) {
- pr_warning("bpf relocation: map_idx %d large than %d\n",
+ pr_warning("bpf relocation: map_idx %d larger than %d\n",
(int)map_idx, (int)nr_maps - 1);
return -LIBBPF_ERRNO__RELOC;
}
@@ -1459,14 +1850,18 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
return 0;
}
-static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
+static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
{
struct bpf_map_def *def = &map->def;
__u32 key_type_id = 0, value_type_id = 0;
int ret;
+ /* if it's BTF-defined map, we don't need to search for type IDs */
+ if (map->sec_idx == obj->efile.btf_maps_shndx)
+ return 0;
+
if (!bpf_map__is_internal(map)) {
- ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
+ ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
def->value_size, &key_type_id,
&value_type_id);
} else {
@@ -1474,7 +1869,7 @@ static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
* LLVM annotates global data differently in BTF, that is,
* only as '.data', '.bss' or '.rodata'.
*/
- ret = btf__find_by_name(btf,
+ ret = btf__find_by_name(obj->btf,
libbpf_type_to_btf_name[map->libbpf_type]);
}
if (ret < 0)
@@ -1646,14 +2041,16 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj)
/* FUNC x */ /* [3] */
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
};
- int res;
+ int btf_fd;
- res = libbpf__probe_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
- if (res < 0)
- return res;
- if (res > 0)
+ btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs));
+ if (btf_fd >= 0) {
obj->caps.btf_func = 1;
+ close(btf_fd);
+ return 1;
+ }
+
return 0;
}
@@ -1671,14 +2068,16 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
};
- int res;
+ int btf_fd;
- res = libbpf__probe_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
- if (res < 0)
- return res;
- if (res > 0)
+ btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs));
+ if (btf_fd >= 0) {
obj->caps.btf_datasec = 1;
+ close(btf_fd);
+ return 1;
+ }
+
return 0;
}
@@ -1696,7 +2095,7 @@ bpf_object__probe_caps(struct bpf_object *obj)
for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
ret = probe_fn[i](obj);
if (ret < 0)
- return ret;
+ pr_debug("Probe #%d failed with %d.\n", i, ret);
}
return 0;
@@ -1734,6 +2133,7 @@ static int
bpf_object__create_maps(struct bpf_object *obj)
{
struct bpf_create_map_attr create_attr = {};
+ int nr_cpus = 0;
unsigned int i;
int err;
@@ -1756,7 +2156,22 @@ bpf_object__create_maps(struct bpf_object *obj)
create_attr.map_flags = def->map_flags;
create_attr.key_size = def->key_size;
create_attr.value_size = def->value_size;
- create_attr.max_entries = def->max_entries;
+ if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
+ !def->max_entries) {
+ if (!nr_cpus)
+ nr_cpus = libbpf_num_possible_cpus();
+ if (nr_cpus < 0) {
+ pr_warning("failed to determine number of system CPUs: %d\n",
+ nr_cpus);
+ err = nr_cpus;
+ goto err_out;
+ }
+ pr_debug("map '%s': setting size to %d\n",
+ map->name, nr_cpus);
+ create_attr.max_entries = nr_cpus;
+ } else {
+ create_attr.max_entries = def->max_entries;
+ }
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
@@ -1764,17 +2179,19 @@ bpf_object__create_maps(struct bpf_object *obj)
map->inner_map_fd >= 0)
create_attr.inner_map_fd = map->inner_map_fd;
- if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
+ if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
create_attr.btf_fd = btf__fd(obj->btf);
create_attr.btf_key_type_id = map->btf_key_type_id;
create_attr.btf_value_type_id = map->btf_value_type_id;
}
*pfd = bpf_create_map_xattr(&create_attr);
- if (*pfd < 0 && create_attr.btf_key_type_id) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ if (*pfd < 0 && (create_attr.btf_key_type_id ||
+ create_attr.btf_value_type_id)) {
+ err = -errno;
+ cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
- map->name, cp, errno);
+ map->name, cp, err);
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
@@ -1786,11 +2203,11 @@ bpf_object__create_maps(struct bpf_object *obj)
if (*pfd < 0) {
size_t j;
- err = *pfd;
+ err = -errno;
err_out:
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warning("failed to create map (name: '%s'): %s\n",
- map->name, cp);
+ cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
+ pr_warning("failed to create map (name: '%s'): %s(%d)\n",
+ map->name, cp, err);
for (j = 0; j < i; j++)
zclose(obj->maps[j].fd);
return err;
@@ -1804,7 +2221,7 @@ err_out:
}
}
- pr_debug("create map %s: fd=%d\n", map->name, *pfd);
+ pr_debug("created map %s: fd=%d\n", map->name, *pfd);
}
return 0;
@@ -1825,18 +2242,14 @@ check_btf_ext_reloc_err(struct bpf_program *prog, int err,
if (btf_prog_info) {
/*
* Some info has already been found but has problem
- * in the last btf_ext reloc. Must have to error
- * out.
+ * in the last btf_ext reloc. Must have to error out.
*/
pr_warning("Error in relocating %s for sec %s.\n",
info_name, prog->section_name);
return err;
}
- /*
- * Have problem loading the very first info. Ignore
- * the rest.
- */
+ /* Have problem loading the very first info. Ignore the rest. */
pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
info_name, prog->section_name, info_name);
return 0;
@@ -1880,12 +2293,897 @@ bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
}
- if (!insn_offset)
- prog->btf_fd = btf__fd(obj->btf);
+ return 0;
+}
+
+#define BPF_CORE_SPEC_MAX_LEN 64
+
+/* represents BPF CO-RE field or array element accessor */
+struct bpf_core_accessor {
+ __u32 type_id; /* struct/union type or array element type */
+ __u32 idx; /* field index or array index */
+ const char *name; /* field name or NULL for array accessor */
+};
+
+struct bpf_core_spec {
+ const struct btf *btf;
+ /* high-level spec: named fields and array indices only */
+ struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
+ /* high-level spec length */
+ int len;
+ /* raw, low-level spec: 1-to-1 with accessor spec string */
+ int raw_spec[BPF_CORE_SPEC_MAX_LEN];
+ /* raw spec length */
+ int raw_len;
+ /* field byte offset represented by spec */
+ __u32 offset;
+};
+
+static bool str_is_empty(const char *s)
+{
+ return !s || !s[0];
+}
+
+/*
+ * Turn bpf_offset_reloc into a low- and high-level spec representation,
+ * validating correctness along the way, as well as calculating resulting
+ * field offset (in bytes), specified by accessor string. Low-level spec
+ * captures every single level of nestedness, including traversing anonymous
+ * struct/union members. High-level one only captures semantically meaningful
+ * "turning points": named fields and array indicies.
+ * E.g., for this case:
+ *
+ * struct sample {
+ * int __unimportant;
+ * struct {
+ * int __1;
+ * int __2;
+ * int a[7];
+ * };
+ * };
+ *
+ * struct sample *s = ...;
+ *
+ * int x = &s->a[3]; // access string = '0:1:2:3'
+ *
+ * Low-level spec has 1:1 mapping with each element of access string (it's
+ * just a parsed access string representation): [0, 1, 2, 3].
+ *
+ * High-level spec will capture only 3 points:
+ * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
+ * - field 'a' access (corresponds to '2' in low-level spec);
+ * - array element #3 access (corresponds to '3' in low-level spec).
+ *
+ */
+static int bpf_core_spec_parse(const struct btf *btf,
+ __u32 type_id,
+ const char *spec_str,
+ struct bpf_core_spec *spec)
+{
+ int access_idx, parsed_len, i;
+ const struct btf_type *t;
+ const char *name;
+ __u32 id;
+ __s64 sz;
+
+ if (str_is_empty(spec_str) || *spec_str == ':')
+ return -EINVAL;
+
+ memset(spec, 0, sizeof(*spec));
+ spec->btf = btf;
+
+ /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
+ while (*spec_str) {
+ if (*spec_str == ':')
+ ++spec_str;
+ if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
+ return -EINVAL;
+ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
+ return -E2BIG;
+ spec_str += parsed_len;
+ spec->raw_spec[spec->raw_len++] = access_idx;
+ }
+
+ if (spec->raw_len == 0)
+ return -EINVAL;
+
+ /* first spec value is always reloc type array index */
+ t = skip_mods_and_typedefs(btf, type_id, &id);
+ if (!t)
+ return -EINVAL;
+
+ access_idx = spec->raw_spec[0];
+ spec->spec[0].type_id = id;
+ spec->spec[0].idx = access_idx;
+ spec->len++;
+
+ sz = btf__resolve_size(btf, id);
+ if (sz < 0)
+ return sz;
+ spec->offset = access_idx * sz;
+
+ for (i = 1; i < spec->raw_len; i++) {
+ t = skip_mods_and_typedefs(btf, id, &id);
+ if (!t)
+ return -EINVAL;
+
+ access_idx = spec->raw_spec[i];
+
+ if (btf_is_composite(t)) {
+ const struct btf_member *m;
+ __u32 offset;
+
+ if (access_idx >= btf_vlen(t))
+ return -EINVAL;
+ if (btf_member_bitfield_size(t, access_idx))
+ return -EINVAL;
+
+ offset = btf_member_bit_offset(t, access_idx);
+ if (offset % 8)
+ return -EINVAL;
+ spec->offset += offset / 8;
+
+ m = btf_members(t) + access_idx;
+ if (m->name_off) {
+ name = btf__name_by_offset(btf, m->name_off);
+ if (str_is_empty(name))
+ return -EINVAL;
+
+ spec->spec[spec->len].type_id = id;
+ spec->spec[spec->len].idx = access_idx;
+ spec->spec[spec->len].name = name;
+ spec->len++;
+ }
+
+ id = m->type;
+ } else if (btf_is_array(t)) {
+ const struct btf_array *a = btf_array(t);
+
+ t = skip_mods_and_typedefs(btf, a->type, &id);
+ if (!t || access_idx >= a->nelems)
+ return -EINVAL;
+
+ spec->spec[spec->len].type_id = id;
+ spec->spec[spec->len].idx = access_idx;
+ spec->len++;
+
+ sz = btf__resolve_size(btf, id);
+ if (sz < 0)
+ return sz;
+ spec->offset += access_idx * sz;
+ } else {
+ pr_warning("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
+ type_id, spec_str, i, id, btf_kind(t));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static bool bpf_core_is_flavor_sep(const char *s)
+{
+ /* check X___Y name pattern, where X and Y are not underscores */
+ return s[0] != '_' && /* X */
+ s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
+ s[4] != '_'; /* Y */
+}
+
+/* Given 'some_struct_name___with_flavor' return the length of a name prefix
+ * before last triple underscore. Struct name part after last triple
+ * underscore is ignored by BPF CO-RE relocation during relocation matching.
+ */
+static size_t bpf_core_essential_name_len(const char *name)
+{
+ size_t n = strlen(name);
+ int i;
+
+ for (i = n - 5; i >= 0; i--) {
+ if (bpf_core_is_flavor_sep(name + i))
+ return i + 1;
+ }
+ return n;
+}
+
+/* dynamically sized list of type IDs */
+struct ids_vec {
+ __u32 *data;
+ int len;
+};
+
+static void bpf_core_free_cands(struct ids_vec *cand_ids)
+{
+ free(cand_ids->data);
+ free(cand_ids);
+}
+
+static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
+ __u32 local_type_id,
+ const struct btf *targ_btf)
+{
+ size_t local_essent_len, targ_essent_len;
+ const char *local_name, *targ_name;
+ const struct btf_type *t;
+ struct ids_vec *cand_ids;
+ __u32 *new_ids;
+ int i, err, n;
+
+ t = btf__type_by_id(local_btf, local_type_id);
+ if (!t)
+ return ERR_PTR(-EINVAL);
+
+ local_name = btf__name_by_offset(local_btf, t->name_off);
+ if (str_is_empty(local_name))
+ return ERR_PTR(-EINVAL);
+ local_essent_len = bpf_core_essential_name_len(local_name);
+
+ cand_ids = calloc(1, sizeof(*cand_ids));
+ if (!cand_ids)
+ return ERR_PTR(-ENOMEM);
+
+ n = btf__get_nr_types(targ_btf);
+ for (i = 1; i <= n; i++) {
+ t = btf__type_by_id(targ_btf, i);
+ targ_name = btf__name_by_offset(targ_btf, t->name_off);
+ if (str_is_empty(targ_name))
+ continue;
+
+ targ_essent_len = bpf_core_essential_name_len(targ_name);
+ if (targ_essent_len != local_essent_len)
+ continue;
+
+ if (strncmp(local_name, targ_name, local_essent_len) == 0) {
+ pr_debug("[%d] %s: found candidate [%d] %s\n",
+ local_type_id, local_name, i, targ_name);
+ new_ids = realloc(cand_ids->data, cand_ids->len + 1);
+ if (!new_ids) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ cand_ids->data = new_ids;
+ cand_ids->data[cand_ids->len++] = i;
+ }
+ }
+ return cand_ids;
+err_out:
+ bpf_core_free_cands(cand_ids);
+ return ERR_PTR(err);
+}
+
+/* Check two types for compatibility, skipping const/volatile/restrict and
+ * typedefs, to ensure we are relocating offset to the compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+ * - any two FWDs are compatible;
+ * - any two PTRs are always compatible;
+ * - for ENUMs, check sizes, names are ignored;
+ * - for INT, size and bitness should match, signedness is ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - everything else shouldn't be ever a target of relocation.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ */
+static int bpf_core_fields_are_compat(const struct btf *local_btf,
+ __u32 local_id,
+ const struct btf *targ_btf,
+ __u32 targ_id)
+{
+ const struct btf_type *local_type, *targ_type;
+
+recur:
+ local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
+ targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!local_type || !targ_type)
+ return -EINVAL;
+
+ if (btf_is_composite(local_type) && btf_is_composite(targ_type))
+ return 1;
+ if (btf_kind(local_type) != btf_kind(targ_type))
+ return 0;
+
+ switch (btf_kind(local_type)) {
+ case BTF_KIND_FWD:
+ case BTF_KIND_PTR:
+ return 1;
+ case BTF_KIND_ENUM:
+ return local_type->size == targ_type->size;
+ case BTF_KIND_INT:
+ return btf_int_offset(local_type) == 0 &&
+ btf_int_offset(targ_type) == 0 &&
+ local_type->size == targ_type->size &&
+ btf_int_bits(local_type) == btf_int_bits(targ_type);
+ case BTF_KIND_ARRAY:
+ local_id = btf_array(local_type)->type;
+ targ_id = btf_array(targ_type)->type;
+ goto recur;
+ default:
+ pr_warning("unexpected kind %d relocated, local [%d], target [%d]\n",
+ btf_kind(local_type), local_id, targ_id);
+ return 0;
+ }
+}
+
+/*
+ * Given single high-level named field accessor in local type, find
+ * corresponding high-level accessor for a target type. Along the way,
+ * maintain low-level spec for target as well. Also keep updating target
+ * offset.
+ *
+ * Searching is performed through recursive exhaustive enumeration of all
+ * fields of a struct/union. If there are any anonymous (embedded)
+ * structs/unions, they are recursively searched as well. If field with
+ * desired name is found, check compatibility between local and target types,
+ * before returning result.
+ *
+ * 1 is returned, if field is found.
+ * 0 is returned if no compatible field is found.
+ * <0 is returned on error.
+ */
+static int bpf_core_match_member(const struct btf *local_btf,
+ const struct bpf_core_accessor *local_acc,
+ const struct btf *targ_btf,
+ __u32 targ_id,
+ struct bpf_core_spec *spec,
+ __u32 *next_targ_id)
+{
+ const struct btf_type *local_type, *targ_type;
+ const struct btf_member *local_member, *m;
+ const char *local_name, *targ_name;
+ __u32 local_id;
+ int i, n, found;
+
+ targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!targ_type)
+ return -EINVAL;
+ if (!btf_is_composite(targ_type))
+ return 0;
+
+ local_id = local_acc->type_id;
+ local_type = btf__type_by_id(local_btf, local_id);
+ local_member = btf_members(local_type) + local_acc->idx;
+ local_name = btf__name_by_offset(local_btf, local_member->name_off);
+
+ n = btf_vlen(targ_type);
+ m = btf_members(targ_type);
+ for (i = 0; i < n; i++, m++) {
+ __u32 offset;
+
+ /* bitfield relocations not supported */
+ if (btf_member_bitfield_size(targ_type, i))
+ continue;
+ offset = btf_member_bit_offset(targ_type, i);
+ if (offset % 8)
+ continue;
+
+ /* too deep struct/union/array nesting */
+ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
+ return -E2BIG;
+
+ /* speculate this member will be the good one */
+ spec->offset += offset / 8;
+ spec->raw_spec[spec->raw_len++] = i;
+
+ targ_name = btf__name_by_offset(targ_btf, m->name_off);
+ if (str_is_empty(targ_name)) {
+ /* embedded struct/union, we need to go deeper */
+ found = bpf_core_match_member(local_btf, local_acc,
+ targ_btf, m->type,
+ spec, next_targ_id);
+ if (found) /* either found or error */
+ return found;
+ } else if (strcmp(local_name, targ_name) == 0) {
+ /* matching named field */
+ struct bpf_core_accessor *targ_acc;
+
+ targ_acc = &spec->spec[spec->len++];
+ targ_acc->type_id = targ_id;
+ targ_acc->idx = i;
+ targ_acc->name = targ_name;
+
+ *next_targ_id = m->type;
+ found = bpf_core_fields_are_compat(local_btf,
+ local_member->type,
+ targ_btf, m->type);
+ if (!found)
+ spec->len--; /* pop accessor */
+ return found;
+ }
+ /* member turned out not to be what we looked for */
+ spec->offset -= offset / 8;
+ spec->raw_len--;
+ }
+
+ return 0;
+}
+
+/*
+ * Try to match local spec to a target type and, if successful, produce full
+ * target spec (high-level, low-level + offset).
+ */
+static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
+ const struct btf *targ_btf, __u32 targ_id,
+ struct bpf_core_spec *targ_spec)
+{
+ const struct btf_type *targ_type;
+ const struct bpf_core_accessor *local_acc;
+ struct bpf_core_accessor *targ_acc;
+ int i, sz, matched;
+ memset(targ_spec, 0, sizeof(*targ_spec));
+ targ_spec->btf = targ_btf;
+
+ local_acc = &local_spec->spec[0];
+ targ_acc = &targ_spec->spec[0];
+
+ for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
+ targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
+ &targ_id);
+ if (!targ_type)
+ return -EINVAL;
+
+ if (local_acc->name) {
+ matched = bpf_core_match_member(local_spec->btf,
+ local_acc,
+ targ_btf, targ_id,
+ targ_spec, &targ_id);
+ if (matched <= 0)
+ return matched;
+ } else {
+ /* for i=0, targ_id is already treated as array element
+ * type (because it's the original struct), for others
+ * we should find array element type first
+ */
+ if (i > 0) {
+ const struct btf_array *a;
+
+ if (!btf_is_array(targ_type))
+ return 0;
+
+ a = btf_array(targ_type);
+ if (local_acc->idx >= a->nelems)
+ return 0;
+ if (!skip_mods_and_typedefs(targ_btf, a->type,
+ &targ_id))
+ return -EINVAL;
+ }
+
+ /* too deep struct/union/array nesting */
+ if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
+ return -E2BIG;
+
+ targ_acc->type_id = targ_id;
+ targ_acc->idx = local_acc->idx;
+ targ_acc->name = NULL;
+ targ_spec->len++;
+ targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
+ targ_spec->raw_len++;
+
+ sz = btf__resolve_size(targ_btf, targ_id);
+ if (sz < 0)
+ return sz;
+ targ_spec->offset += local_acc->idx * sz;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * Patch relocatable BPF instruction.
+ * Expected insn->imm value is provided for validation, as well as the new
+ * relocated value.
+ *
+ * Currently three kinds of BPF instructions are supported:
+ * 1. rX = <imm> (assignment with immediate operand);
+ * 2. rX += <imm> (arithmetic operations with immediate operand);
+ * 3. *(rX) = <imm> (indirect memory assignment with immediate operand).
+ *
+ * If actual insn->imm value is wrong, bail out.
+ */
+static int bpf_core_reloc_insn(struct bpf_program *prog, int insn_off,
+ __u32 orig_off, __u32 new_off)
+{
+ struct bpf_insn *insn;
+ int insn_idx;
+ __u8 class;
+
+ if (insn_off % sizeof(struct bpf_insn))
+ return -EINVAL;
+ insn_idx = insn_off / sizeof(struct bpf_insn);
+
+ insn = &prog->insns[insn_idx];
+ class = BPF_CLASS(insn->code);
+
+ if (class == BPF_ALU || class == BPF_ALU64) {
+ if (BPF_SRC(insn->code) != BPF_K)
+ return -EINVAL;
+ if (insn->imm != orig_off)
+ return -EINVAL;
+ insn->imm = new_off;
+ pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n",
+ bpf_program__title(prog, false),
+ insn_idx, orig_off, new_off);
+ } else {
+ pr_warning("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
+ bpf_program__title(prog, false),
+ insn_idx, insn->code, insn->src_reg, insn->dst_reg,
+ insn->off, insn->imm);
+ return -EINVAL;
+ }
return 0;
}
+static struct btf *btf_load_raw(const char *path)
+{
+ struct btf *btf;
+ size_t read_cnt;
+ struct stat st;
+ void *data;
+ FILE *f;
+
+ if (stat(path, &st))
+ return ERR_PTR(-errno);
+
+ data = malloc(st.st_size);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ f = fopen(path, "rb");
+ if (!f) {
+ btf = ERR_PTR(-errno);
+ goto cleanup;
+ }
+
+ read_cnt = fread(data, 1, st.st_size, f);
+ fclose(f);
+ if (read_cnt < st.st_size) {
+ btf = ERR_PTR(-EBADF);
+ goto cleanup;
+ }
+
+ btf = btf__new(data, read_cnt);
+
+cleanup:
+ free(data);
+ return btf;
+}
+
+/*
+ * Probe few well-known locations for vmlinux kernel image and try to load BTF
+ * data out of it to use for target BTF.
+ */
+static struct btf *bpf_core_find_kernel_btf(void)
+{
+ struct {
+ const char *path_fmt;
+ bool raw_btf;
+ } locations[] = {
+ /* try canonical vmlinux BTF through sysfs first */
+ { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
+ /* fall back to trying to find vmlinux ELF on disk otherwise */
+ { "/boot/vmlinux-%1$s" },
+ { "/lib/modules/%1$s/vmlinux-%1$s" },
+ { "/lib/modules/%1$s/build/vmlinux" },
+ { "/usr/lib/modules/%1$s/kernel/vmlinux" },
+ { "/usr/lib/debug/boot/vmlinux-%1$s" },
+ { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
+ { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
+ };
+ char path[PATH_MAX + 1];
+ struct utsname buf;
+ struct btf *btf;
+ int i;
+
+ uname(&buf);
+
+ for (i = 0; i < ARRAY_SIZE(locations); i++) {
+ snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
+
+ if (access(path, R_OK))
+ continue;
+
+ if (locations[i].raw_btf)
+ btf = btf_load_raw(path);
+ else
+ btf = btf__parse_elf(path, NULL);
+
+ pr_debug("loading kernel BTF '%s': %ld\n",
+ path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
+ if (IS_ERR(btf))
+ continue;
+
+ return btf;
+ }
+
+ pr_warning("failed to find valid kernel BTF\n");
+ return ERR_PTR(-ESRCH);
+}
+
+/* Output spec definition in the format:
+ * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
+ * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
+ */
+static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
+{
+ const struct btf_type *t;
+ const char *s;
+ __u32 type_id;
+ int i;
+
+ type_id = spec->spec[0].type_id;
+ t = btf__type_by_id(spec->btf, type_id);
+ s = btf__name_by_offset(spec->btf, t->name_off);
+ libbpf_print(level, "[%u] %s + ", type_id, s);
+
+ for (i = 0; i < spec->raw_len; i++)
+ libbpf_print(level, "%d%s", spec->raw_spec[i],
+ i == spec->raw_len - 1 ? " => " : ":");
+
+ libbpf_print(level, "%u @ &x", spec->offset);
+
+ for (i = 0; i < spec->len; i++) {
+ if (spec->spec[i].name)
+ libbpf_print(level, ".%s", spec->spec[i].name);
+ else
+ libbpf_print(level, "[%u]", spec->spec[i].idx);
+ }
+
+}
+
+static size_t bpf_core_hash_fn(const void *key, void *ctx)
+{
+ return (size_t)key;
+}
+
+static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
+{
+ return k1 == k2;
+}
+
+static void *u32_as_hash_key(__u32 x)
+{
+ return (void *)(uintptr_t)x;
+}
+
+/*
+ * CO-RE relocate single instruction.
+ *
+ * The outline and important points of the algorithm:
+ * 1. For given local type, find corresponding candidate target types.
+ * Candidate type is a type with the same "essential" name, ignoring
+ * everything after last triple underscore (___). E.g., `sample`,
+ * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
+ * for each other. Names with triple underscore are referred to as
+ * "flavors" and are useful, among other things, to allow to
+ * specify/support incompatible variations of the same kernel struct, which
+ * might differ between different kernel versions and/or build
+ * configurations.
+ *
+ * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
+ * converter, when deduplicated BTF of a kernel still contains more than
+ * one different types with the same name. In that case, ___2, ___3, etc
+ * are appended starting from second name conflict. But start flavors are
+ * also useful to be defined "locally", in BPF program, to extract same
+ * data from incompatible changes between different kernel
+ * versions/configurations. For instance, to handle field renames between
+ * kernel versions, one can use two flavors of the struct name with the
+ * same common name and use conditional relocations to extract that field,
+ * depending on target kernel version.
+ * 2. For each candidate type, try to match local specification to this
+ * candidate target type. Matching involves finding corresponding
+ * high-level spec accessors, meaning that all named fields should match,
+ * as well as all array accesses should be within the actual bounds. Also,
+ * types should be compatible (see bpf_core_fields_are_compat for details).
+ * 3. It is supported and expected that there might be multiple flavors
+ * matching the spec. As long as all the specs resolve to the same set of
+ * offsets across all candidates, there is not error. If there is any
+ * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
+ * imprefection of BTF deduplication, which can cause slight duplication of
+ * the same BTF type, if some directly or indirectly referenced (by
+ * pointer) type gets resolved to different actual types in different
+ * object files. If such situation occurs, deduplicated BTF will end up
+ * with two (or more) structurally identical types, which differ only in
+ * types they refer to through pointer. This should be OK in most cases and
+ * is not an error.
+ * 4. Candidate types search is performed by linearly scanning through all
+ * types in target BTF. It is anticipated that this is overall more
+ * efficient memory-wise and not significantly worse (if not better)
+ * CPU-wise compared to prebuilding a map from all local type names to
+ * a list of candidate type names. It's also sped up by caching resolved
+ * list of matching candidates per each local "root" type ID, that has at
+ * least one bpf_offset_reloc associated with it. This list is shared
+ * between multiple relocations for the same type ID and is updated as some
+ * of the candidates are pruned due to structural incompatibility.
+ */
+static int bpf_core_reloc_offset(struct bpf_program *prog,
+ const struct bpf_offset_reloc *relo,
+ int relo_idx,
+ const struct btf *local_btf,
+ const struct btf *targ_btf,
+ struct hashmap *cand_cache)
+{
+ const char *prog_name = bpf_program__title(prog, false);
+ struct bpf_core_spec local_spec, cand_spec, targ_spec;
+ const void *type_key = u32_as_hash_key(relo->type_id);
+ const struct btf_type *local_type, *cand_type;
+ const char *local_name, *cand_name;
+ struct ids_vec *cand_ids;
+ __u32 local_id, cand_id;
+ const char *spec_str;
+ int i, j, err;
+
+ local_id = relo->type_id;
+ local_type = btf__type_by_id(local_btf, local_id);
+ if (!local_type)
+ return -EINVAL;
+
+ local_name = btf__name_by_offset(local_btf, local_type->name_off);
+ if (str_is_empty(local_name))
+ return -EINVAL;
+
+ spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
+ if (str_is_empty(spec_str))
+ return -EINVAL;
+
+ err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
+ if (err) {
+ pr_warning("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
+ prog_name, relo_idx, local_id, local_name, spec_str,
+ err);
+ return -EINVAL;
+ }
+
+ pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx);
+ bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
+ libbpf_print(LIBBPF_DEBUG, "\n");
+
+ if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
+ cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
+ if (IS_ERR(cand_ids)) {
+ pr_warning("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
+ prog_name, relo_idx, local_id, local_name,
+ PTR_ERR(cand_ids));
+ return PTR_ERR(cand_ids);
+ }
+ err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
+ if (err) {
+ bpf_core_free_cands(cand_ids);
+ return err;
+ }
+ }
+
+ for (i = 0, j = 0; i < cand_ids->len; i++) {
+ cand_id = cand_ids->data[i];
+ cand_type = btf__type_by_id(targ_btf, cand_id);
+ cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
+
+ err = bpf_core_spec_match(&local_spec, targ_btf,
+ cand_id, &cand_spec);
+ pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
+ prog_name, relo_idx, i, cand_name);
+ bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
+ libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
+ if (err < 0) {
+ pr_warning("prog '%s': relo #%d: matching error: %d\n",
+ prog_name, relo_idx, err);
+ return err;
+ }
+ if (err == 0)
+ continue;
+
+ if (j == 0) {
+ targ_spec = cand_spec;
+ } else if (cand_spec.offset != targ_spec.offset) {
+ /* if there are many candidates, they should all
+ * resolve to the same offset
+ */
+ pr_warning("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
+ prog_name, relo_idx, cand_spec.offset,
+ targ_spec.offset);
+ return -EINVAL;
+ }
+
+ cand_ids->data[j++] = cand_spec.spec[0].type_id;
+ }
+
+ cand_ids->len = j;
+ if (cand_ids->len == 0) {
+ pr_warning("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
+ prog_name, relo_idx, local_id, local_name, spec_str);
+ return -ESRCH;
+ }
+
+ err = bpf_core_reloc_insn(prog, relo->insn_off,
+ local_spec.offset, targ_spec.offset);
+ if (err) {
+ pr_warning("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
+ prog_name, relo_idx, relo->insn_off, err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
+{
+ const struct btf_ext_info_sec *sec;
+ const struct bpf_offset_reloc *rec;
+ const struct btf_ext_info *seg;
+ struct hashmap_entry *entry;
+ struct hashmap *cand_cache = NULL;
+ struct bpf_program *prog;
+ struct btf *targ_btf;
+ const char *sec_name;
+ int i, err = 0;
+
+ if (targ_btf_path)
+ targ_btf = btf__parse_elf(targ_btf_path, NULL);
+ else
+ targ_btf = bpf_core_find_kernel_btf();
+ if (IS_ERR(targ_btf)) {
+ pr_warning("failed to get target BTF: %ld\n",
+ PTR_ERR(targ_btf));
+ return PTR_ERR(targ_btf);
+ }
+
+ cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
+ if (IS_ERR(cand_cache)) {
+ err = PTR_ERR(cand_cache);
+ goto out;
+ }
+
+ seg = &obj->btf_ext->offset_reloc_info;
+ for_each_btf_ext_sec(seg, sec) {
+ sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
+ if (str_is_empty(sec_name)) {
+ err = -EINVAL;
+ goto out;
+ }
+ prog = bpf_object__find_program_by_title(obj, sec_name);
+ if (!prog) {
+ pr_warning("failed to find program '%s' for CO-RE offset relocation\n",
+ sec_name);
+ err = -EINVAL;
+ goto out;
+ }
+
+ pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
+ sec_name, sec->num_info);
+
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+ err = bpf_core_reloc_offset(prog, rec, i, obj->btf,
+ targ_btf, cand_cache);
+ if (err) {
+ pr_warning("prog '%s': relo #%d: failed to relocate: %d\n",
+ sec_name, i, err);
+ goto out;
+ }
+ }
+ }
+
+out:
+ btf__free(targ_btf);
+ if (!IS_ERR_OR_NULL(cand_cache)) {
+ hashmap__for_each_entry(cand_cache, entry, i) {
+ bpf_core_free_cands(entry->value);
+ }
+ hashmap__free(cand_cache);
+ }
+ return err;
+}
+
+static int
+bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
+{
+ int err = 0;
+
+ if (obj->btf_ext->offset_reloc_info.len)
+ err = bpf_core_reloc_offsets(obj, targ_btf_path);
+
+ return err;
+}
+
static int
bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
struct reloc_desc *relo)
@@ -1993,14 +3291,21 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
return 0;
}
-
static int
-bpf_object__relocate(struct bpf_object *obj)
+bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
{
struct bpf_program *prog;
size_t i;
int err;
+ if (obj->btf_ext) {
+ err = bpf_object__relocate_core(obj, targ_btf_path);
+ if (err) {
+ pr_warning("failed to perform CO-RE relocations: %d\n",
+ err);
+ return err;
+ }
+ }
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
@@ -2040,9 +3345,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
return -LIBBPF_ERRNO__RELOC;
}
- err = bpf_program__collect_reloc(prog,
- shdr, data,
- obj);
+ err = bpf_program__collect_reloc(prog, shdr, data, obj);
if (err)
return err;
}
@@ -2057,7 +3360,10 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *cp, errmsg[STRERR_BUFSIZE];
int log_buf_size = BPF_LOG_BUF_SIZE;
char *log_buf;
- int ret;
+ int btf_fd, ret;
+
+ if (!insns || !insns_cnt)
+ return -EINVAL;
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
load_attr.prog_type = prog->type;
@@ -2069,7 +3375,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.license = license;
load_attr.kern_version = kern_version;
load_attr.prog_ifindex = prog->prog_ifindex;
- load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
+ /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
+ if (prog->obj->btf_ext)
+ btf_fd = bpf_object__btf_fd(prog->obj);
+ else
+ btf_fd = -1;
+ load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
load_attr.func_info_cnt = prog->func_info_cnt;
@@ -2077,8 +3388,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.line_info_rec_size = prog->line_info_rec_size;
load_attr.line_info_cnt = prog->line_info_cnt;
load_attr.log_level = prog->log_level;
- if (!load_attr.insns || !load_attr.insns_cnt)
- return -EINVAL;
+ load_attr.prog_flags = prog->prog_flags;
retry_load:
log_buf = malloc(log_buf_size);
@@ -2216,14 +3526,14 @@ out:
return err;
}
-static bool bpf_program__is_function_storage(struct bpf_program *prog,
- struct bpf_object *obj)
+static bool bpf_program__is_function_storage(const struct bpf_program *prog,
+ const struct bpf_object *obj)
{
return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
}
static int
-bpf_object__load_progs(struct bpf_object *obj)
+bpf_object__load_progs(struct bpf_object *obj, int log_level)
{
size_t i;
int err;
@@ -2231,6 +3541,7 @@ bpf_object__load_progs(struct bpf_object *obj)
for (i = 0; i < obj->nr_programs; i++) {
if (bpf_program__is_function_storage(&obj->programs[i], obj))
continue;
+ obj->programs[i].log_level |= log_level;
err = bpf_program__load(&obj->programs[i],
obj->license,
obj->kern_version);
@@ -2267,6 +3578,7 @@ static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
case BPF_PROG_TYPE_PERF_EVENT:
case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
return false;
case BPF_PROG_TYPE_KPROBE:
default:
@@ -2357,11 +3669,9 @@ struct bpf_object *bpf_object__open_buffer(void *obj_buf,
snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
(unsigned long)obj_buf,
(unsigned long)obj_buf_sz);
- tmp_name[sizeof(tmp_name) - 1] = '\0';
name = tmp_name;
}
- pr_debug("loading object '%s' from buffer\n",
- name);
+ pr_debug("loading object '%s' from buffer\n", name);
return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
}
@@ -2382,10 +3692,14 @@ int bpf_object__unload(struct bpf_object *obj)
return 0;
}
-int bpf_object__load(struct bpf_object *obj)
+int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
{
+ struct bpf_object *obj;
int err;
+ if (!attr)
+ return -EINVAL;
+ obj = attr->obj;
if (!obj)
return -EINVAL;
@@ -2397,8 +3711,8 @@ int bpf_object__load(struct bpf_object *obj)
obj->loaded = true;
CHECK_ERR(bpf_object__create_maps(obj), err, out);
- CHECK_ERR(bpf_object__relocate(obj), err, out);
- CHECK_ERR(bpf_object__load_progs(obj), err, out);
+ CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out);
+ CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
return 0;
out:
@@ -2407,6 +3721,15 @@ out:
return err;
}
+int bpf_object__load(struct bpf_object *obj)
+{
+ struct bpf_object_load_attr attr = {
+ .obj = obj,
+ };
+
+ return bpf_object__load_xattr(&attr);
+}
+
static int check_path(const char *path)
{
char *cp, errmsg[STRERR_BUFSIZE];
@@ -2911,17 +4234,17 @@ bpf_object__next(struct bpf_object *prev)
return next;
}
-const char *bpf_object__name(struct bpf_object *obj)
+const char *bpf_object__name(const struct bpf_object *obj)
{
return obj ? obj->path : ERR_PTR(-EINVAL);
}
-unsigned int bpf_object__kversion(struct bpf_object *obj)
+unsigned int bpf_object__kversion(const struct bpf_object *obj)
{
return obj ? obj->kern_version : 0;
}
-struct btf *bpf_object__btf(struct bpf_object *obj)
+struct btf *bpf_object__btf(const struct bpf_object *obj)
{
return obj ? obj->btf : NULL;
}
@@ -2942,13 +4265,14 @@ int bpf_object__set_priv(struct bpf_object *obj, void *priv,
return 0;
}
-void *bpf_object__priv(struct bpf_object *obj)
+void *bpf_object__priv(const struct bpf_object *obj)
{
return obj ? obj->priv : ERR_PTR(-EINVAL);
}
static struct bpf_program *
-__bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
+__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
+ bool forward)
{
size_t nr_programs = obj->nr_programs;
ssize_t idx;
@@ -2973,7 +4297,7 @@ __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
}
struct bpf_program *
-bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
+bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
{
struct bpf_program *prog = prev;
@@ -2985,7 +4309,7 @@ bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
}
struct bpf_program *
-bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
+bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
{
struct bpf_program *prog = next;
@@ -3007,7 +4331,7 @@ int bpf_program__set_priv(struct bpf_program *prog, void *priv,
return 0;
}
-void *bpf_program__priv(struct bpf_program *prog)
+void *bpf_program__priv(const struct bpf_program *prog)
{
return prog ? prog->priv : ERR_PTR(-EINVAL);
}
@@ -3017,7 +4341,7 @@ void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
prog->prog_ifindex = ifindex;
}
-const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
+const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
{
const char *title;
@@ -3033,7 +4357,7 @@ const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
return title;
}
-int bpf_program__fd(struct bpf_program *prog)
+int bpf_program__fd(const struct bpf_program *prog)
{
return bpf_program__nth_fd(prog, 0);
}
@@ -3066,7 +4390,7 @@ int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
return 0;
}
-int bpf_program__nth_fd(struct bpf_program *prog, int n)
+int bpf_program__nth_fd(const struct bpf_program *prog, int n)
{
int fd;
@@ -3094,25 +4418,25 @@ void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
prog->type = type;
}
-static bool bpf_program__is_type(struct bpf_program *prog,
+static bool bpf_program__is_type(const struct bpf_program *prog,
enum bpf_prog_type type)
{
return prog ? (prog->type == type) : false;
}
-#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
-int bpf_program__set_##NAME(struct bpf_program *prog) \
-{ \
- if (!prog) \
- return -EINVAL; \
- bpf_program__set_type(prog, TYPE); \
- return 0; \
-} \
- \
-bool bpf_program__is_##NAME(struct bpf_program *prog) \
-{ \
- return bpf_program__is_type(prog, TYPE); \
-} \
+#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
+int bpf_program__set_##NAME(struct bpf_program *prog) \
+{ \
+ if (!prog) \
+ return -EINVAL; \
+ bpf_program__set_type(prog, TYPE); \
+ return 0; \
+} \
+ \
+bool bpf_program__is_##NAME(const struct bpf_program *prog) \
+{ \
+ return bpf_program__is_type(prog, TYPE); \
+} \
BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
@@ -3207,8 +4531,16 @@ static const struct {
BPF_CGROUP_UDP4_SENDMSG),
BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
BPF_CGROUP_UDP6_SENDMSG),
+ BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_CGROUP_UDP4_RECVMSG),
+ BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_CGROUP_UDP6_RECVMSG),
BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_CGROUP_SYSCTL),
+ BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
+ BPF_CGROUP_GETSOCKOPT),
+ BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
+ BPF_CGROUP_SETSOCKOPT),
};
#undef BPF_PROG_SEC_IMPL
@@ -3307,17 +4639,17 @@ bpf_program__identify_section(struct bpf_program *prog,
expected_attach_type);
}
-int bpf_map__fd(struct bpf_map *map)
+int bpf_map__fd(const struct bpf_map *map)
{
return map ? map->fd : -EINVAL;
}
-const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
+const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
{
return map ? &map->def : ERR_PTR(-EINVAL);
}
-const char *bpf_map__name(struct bpf_map *map)
+const char *bpf_map__name(const struct bpf_map *map)
{
return map ? map->name : NULL;
}
@@ -3348,17 +4680,17 @@ int bpf_map__set_priv(struct bpf_map *map, void *priv,
return 0;
}
-void *bpf_map__priv(struct bpf_map *map)
+void *bpf_map__priv(const struct bpf_map *map)
{
return map ? map->priv : ERR_PTR(-EINVAL);
}
-bool bpf_map__is_offload_neutral(struct bpf_map *map)
+bool bpf_map__is_offload_neutral(const struct bpf_map *map)
{
return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}
-bool bpf_map__is_internal(struct bpf_map *map)
+bool bpf_map__is_internal(const struct bpf_map *map)
{
return map->libbpf_type != LIBBPF_MAP_UNSPEC;
}
@@ -3383,7 +4715,7 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
}
static struct bpf_map *
-__bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
+__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
{
ssize_t idx;
struct bpf_map *s, *e;
@@ -3407,7 +4739,7 @@ __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
}
struct bpf_map *
-bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
+bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
{
if (prev == NULL)
return obj->maps;
@@ -3416,7 +4748,7 @@ bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
}
struct bpf_map *
-bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
+bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
{
if (next == NULL) {
if (!obj->nr_maps)
@@ -3428,7 +4760,7 @@ bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
}
struct bpf_map *
-bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
+bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
{
struct bpf_map *pos;
@@ -3440,7 +4772,7 @@ bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
}
int
-bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
+bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
{
return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
}
@@ -3448,20 +4780,12 @@ bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
{
- int i;
-
- for (i = 0; i < obj->nr_maps; i++) {
- if (obj->maps[i].offset == offset)
- return &obj->maps[i];
- }
- return ERR_PTR(-ENOENT);
+ return ERR_PTR(-ENOTSUP);
}
long libbpf_get_error(const void *ptr)
{
- if (IS_ERR(ptr))
- return PTR_ERR(ptr);
- return 0;
+ return PTR_ERR_OR_ZERO(ptr);
}
int bpf_prog_load(const char *file, enum bpf_prog_type type,
@@ -3480,10 +4804,7 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
struct bpf_object **pobj, int *prog_fd)
{
- struct bpf_object_open_attr open_attr = {
- .file = attr->file,
- .prog_type = attr->prog_type,
- };
+ struct bpf_object_open_attr open_attr = {};
struct bpf_program *prog, *first_prog = NULL;
enum bpf_attach_type expected_attach_type;
enum bpf_prog_type prog_type;
@@ -3496,6 +4817,9 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
if (!attr->file)
return -EINVAL;
+ open_attr.file = attr->file;
+ open_attr.prog_type = attr->prog_type;
+
obj = bpf_object__open_xattr(&open_attr);
if (IS_ERR_OR_NULL(obj))
return -ENOENT;
@@ -3522,6 +4846,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
expected_attach_type);
prog->log_level = attr->log_level;
+ prog->prog_flags = attr->prog_flags;
if (!first_prog)
first_prog = prog;
}
@@ -3548,6 +4873,372 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
return 0;
}
+struct bpf_link {
+ int (*destroy)(struct bpf_link *link);
+};
+
+int bpf_link__destroy(struct bpf_link *link)
+{
+ int err;
+
+ if (!link)
+ return 0;
+
+ err = link->destroy(link);
+ free(link);
+
+ return err;
+}
+
+struct bpf_link_fd {
+ struct bpf_link link; /* has to be at the top of struct */
+ int fd; /* hook FD */
+};
+
+static int bpf_link__destroy_perf_event(struct bpf_link *link)
+{
+ struct bpf_link_fd *l = (void *)link;
+ int err;
+
+ err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
+ if (err)
+ err = -errno;
+
+ close(l->fd);
+ return err;
+}
+
+struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
+ int pfd)
+{
+ char errmsg[STRERR_BUFSIZE];
+ struct bpf_link_fd *link;
+ int prog_fd, err;
+
+ if (pfd < 0) {
+ pr_warning("program '%s': invalid perf event FD %d\n",
+ bpf_program__title(prog, false), pfd);
+ return ERR_PTR(-EINVAL);
+ }
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ pr_warning("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
+ bpf_program__title(prog, false));
+ return ERR_PTR(-EINVAL);
+ }
+
+ link = malloc(sizeof(*link));
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+ link->link.destroy = &bpf_link__destroy_perf_event;
+ link->fd = pfd;
+
+ if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
+ err = -errno;
+ free(link);
+ pr_warning("program '%s': failed to attach to pfd %d: %s\n",
+ bpf_program__title(prog, false), pfd,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ return ERR_PTR(err);
+ }
+ if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
+ err = -errno;
+ free(link);
+ pr_warning("program '%s': failed to enable pfd %d: %s\n",
+ bpf_program__title(prog, false), pfd,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ return ERR_PTR(err);
+ }
+ return (struct bpf_link *)link;
+}
+
+/*
+ * this function is expected to parse integer in the range of [0, 2^31-1] from
+ * given file using scanf format string fmt. If actual parsed value is
+ * negative, the result might be indistinguishable from error
+ */
+static int parse_uint_from_file(const char *file, const char *fmt)
+{
+ char buf[STRERR_BUFSIZE];
+ int err, ret;
+ FILE *f;
+
+ f = fopen(file, "r");
+ if (!f) {
+ err = -errno;
+ pr_debug("failed to open '%s': %s\n", file,
+ libbpf_strerror_r(err, buf, sizeof(buf)));
+ return err;
+ }
+ err = fscanf(f, fmt, &ret);
+ if (err != 1) {
+ err = err == EOF ? -EIO : -errno;
+ pr_debug("failed to parse '%s': %s\n", file,
+ libbpf_strerror_r(err, buf, sizeof(buf)));
+ fclose(f);
+ return err;
+ }
+ fclose(f);
+ return ret;
+}
+
+static int determine_kprobe_perf_type(void)
+{
+ const char *file = "/sys/bus/event_source/devices/kprobe/type";
+
+ return parse_uint_from_file(file, "%d\n");
+}
+
+static int determine_uprobe_perf_type(void)
+{
+ const char *file = "/sys/bus/event_source/devices/uprobe/type";
+
+ return parse_uint_from_file(file, "%d\n");
+}
+
+static int determine_kprobe_retprobe_bit(void)
+{
+ const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
+
+ return parse_uint_from_file(file, "config:%d\n");
+}
+
+static int determine_uprobe_retprobe_bit(void)
+{
+ const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
+
+ return parse_uint_from_file(file, "config:%d\n");
+}
+
+static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
+ uint64_t offset, int pid)
+{
+ struct perf_event_attr attr = {};
+ char errmsg[STRERR_BUFSIZE];
+ int type, pfd, err;
+
+ type = uprobe ? determine_uprobe_perf_type()
+ : determine_kprobe_perf_type();
+ if (type < 0) {
+ pr_warning("failed to determine %s perf type: %s\n",
+ uprobe ? "uprobe" : "kprobe",
+ libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
+ return type;
+ }
+ if (retprobe) {
+ int bit = uprobe ? determine_uprobe_retprobe_bit()
+ : determine_kprobe_retprobe_bit();
+
+ if (bit < 0) {
+ pr_warning("failed to determine %s retprobe bit: %s\n",
+ uprobe ? "uprobe" : "kprobe",
+ libbpf_strerror_r(bit, errmsg,
+ sizeof(errmsg)));
+ return bit;
+ }
+ attr.config |= 1 << bit;
+ }
+ attr.size = sizeof(attr);
+ attr.type = type;
+ attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
+ attr.config2 = offset; /* kprobe_addr or probe_offset */
+
+ /* pid filter is meaningful only for uprobes */
+ pfd = syscall(__NR_perf_event_open, &attr,
+ pid < 0 ? -1 : pid /* pid */,
+ pid == -1 ? 0 : -1 /* cpu */,
+ -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
+ if (pfd < 0) {
+ err = -errno;
+ pr_warning("%s perf_event_open() failed: %s\n",
+ uprobe ? "uprobe" : "kprobe",
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ return err;
+ }
+ return pfd;
+}
+
+struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
+ bool retprobe,
+ const char *func_name)
+{
+ char errmsg[STRERR_BUFSIZE];
+ struct bpf_link *link;
+ int pfd, err;
+
+ pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
+ 0 /* offset */, -1 /* pid */);
+ if (pfd < 0) {
+ pr_warning("program '%s': failed to create %s '%s' perf event: %s\n",
+ bpf_program__title(prog, false),
+ retprobe ? "kretprobe" : "kprobe", func_name,
+ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ return ERR_PTR(pfd);
+ }
+ link = bpf_program__attach_perf_event(prog, pfd);
+ if (IS_ERR(link)) {
+ close(pfd);
+ err = PTR_ERR(link);
+ pr_warning("program '%s': failed to attach to %s '%s': %s\n",
+ bpf_program__title(prog, false),
+ retprobe ? "kretprobe" : "kprobe", func_name,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ return link;
+ }
+ return link;
+}
+
+struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
+ bool retprobe, pid_t pid,
+ const char *binary_path,
+ size_t func_offset)
+{
+ char errmsg[STRERR_BUFSIZE];
+ struct bpf_link *link;
+ int pfd, err;
+
+ pfd = perf_event_open_probe(true /* uprobe */, retprobe,
+ binary_path, func_offset, pid);
+ if (pfd < 0) {
+ pr_warning("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
+ bpf_program__title(prog, false),
+ retprobe ? "uretprobe" : "uprobe",
+ binary_path, func_offset,
+ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ return ERR_PTR(pfd);
+ }
+ link = bpf_program__attach_perf_event(prog, pfd);
+ if (IS_ERR(link)) {
+ close(pfd);
+ err = PTR_ERR(link);
+ pr_warning("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
+ bpf_program__title(prog, false),
+ retprobe ? "uretprobe" : "uprobe",
+ binary_path, func_offset,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ return link;
+ }
+ return link;
+}
+
+static int determine_tracepoint_id(const char *tp_category,
+ const char *tp_name)
+{
+ char file[PATH_MAX];
+ int ret;
+
+ ret = snprintf(file, sizeof(file),
+ "/sys/kernel/debug/tracing/events/%s/%s/id",
+ tp_category, tp_name);
+ if (ret < 0)
+ return -errno;
+ if (ret >= sizeof(file)) {
+ pr_debug("tracepoint %s/%s path is too long\n",
+ tp_category, tp_name);
+ return -E2BIG;
+ }
+ return parse_uint_from_file(file, "%d\n");
+}
+
+static int perf_event_open_tracepoint(const char *tp_category,
+ const char *tp_name)
+{
+ struct perf_event_attr attr = {};
+ char errmsg[STRERR_BUFSIZE];
+ int tp_id, pfd, err;
+
+ tp_id = determine_tracepoint_id(tp_category, tp_name);
+ if (tp_id < 0) {
+ pr_warning("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
+ tp_category, tp_name,
+ libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
+ return tp_id;
+ }
+
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.size = sizeof(attr);
+ attr.config = tp_id;
+
+ pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
+ -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
+ if (pfd < 0) {
+ err = -errno;
+ pr_warning("tracepoint '%s/%s' perf_event_open() failed: %s\n",
+ tp_category, tp_name,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ return err;
+ }
+ return pfd;
+}
+
+struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
+ const char *tp_category,
+ const char *tp_name)
+{
+ char errmsg[STRERR_BUFSIZE];
+ struct bpf_link *link;
+ int pfd, err;
+
+ pfd = perf_event_open_tracepoint(tp_category, tp_name);
+ if (pfd < 0) {
+ pr_warning("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
+ bpf_program__title(prog, false),
+ tp_category, tp_name,
+ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ return ERR_PTR(pfd);
+ }
+ link = bpf_program__attach_perf_event(prog, pfd);
+ if (IS_ERR(link)) {
+ close(pfd);
+ err = PTR_ERR(link);
+ pr_warning("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
+ bpf_program__title(prog, false),
+ tp_category, tp_name,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ return link;
+ }
+ return link;
+}
+
+static int bpf_link__destroy_fd(struct bpf_link *link)
+{
+ struct bpf_link_fd *l = (void *)link;
+
+ return close(l->fd);
+}
+
+struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
+ const char *tp_name)
+{
+ char errmsg[STRERR_BUFSIZE];
+ struct bpf_link_fd *link;
+ int prog_fd, pfd;
+
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ pr_warning("program '%s': can't attach before loaded\n",
+ bpf_program__title(prog, false));
+ return ERR_PTR(-EINVAL);
+ }
+
+ link = malloc(sizeof(*link));
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+ link->link.destroy = &bpf_link__destroy_fd;
+
+ pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
+ if (pfd < 0) {
+ pfd = -errno;
+ free(link);
+ pr_warning("program '%s': failed to attach to raw tracepoint '%s': %s\n",
+ bpf_program__title(prog, false), tp_name,
+ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ return ERR_PTR(pfd);
+ }
+ link->fd = pfd;
+ return (struct bpf_link *)link;
+}
+
enum bpf_perf_event_ret
bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
void **copy_mem, size_t *copy_size,
@@ -3596,6 +5287,370 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
return ret;
}
+struct perf_buffer;
+
+struct perf_buffer_params {
+ struct perf_event_attr *attr;
+ /* if event_cb is specified, it takes precendence */
+ perf_buffer_event_fn event_cb;
+ /* sample_cb and lost_cb are higher-level common-case callbacks */
+ perf_buffer_sample_fn sample_cb;
+ perf_buffer_lost_fn lost_cb;
+ void *ctx;
+ int cpu_cnt;
+ int *cpus;
+ int *map_keys;
+};
+
+struct perf_cpu_buf {
+ struct perf_buffer *pb;
+ void *base; /* mmap()'ed memory */
+ void *buf; /* for reconstructing segmented data */
+ size_t buf_size;
+ int fd;
+ int cpu;
+ int map_key;
+};
+
+struct perf_buffer {
+ perf_buffer_event_fn event_cb;
+ perf_buffer_sample_fn sample_cb;
+ perf_buffer_lost_fn lost_cb;
+ void *ctx; /* passed into callbacks */
+
+ size_t page_size;
+ size_t mmap_size;
+ struct perf_cpu_buf **cpu_bufs;
+ struct epoll_event *events;
+ int cpu_cnt;
+ int epoll_fd; /* perf event FD */
+ int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
+};
+
+static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
+ struct perf_cpu_buf *cpu_buf)
+{
+ if (!cpu_buf)
+ return;
+ if (cpu_buf->base &&
+ munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
+ pr_warning("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
+ if (cpu_buf->fd >= 0) {
+ ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
+ close(cpu_buf->fd);
+ }
+ free(cpu_buf->buf);
+ free(cpu_buf);
+}
+
+void perf_buffer__free(struct perf_buffer *pb)
+{
+ int i;
+
+ if (!pb)
+ return;
+ if (pb->cpu_bufs) {
+ for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
+ struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
+
+ bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
+ perf_buffer__free_cpu_buf(pb, cpu_buf);
+ }
+ free(pb->cpu_bufs);
+ }
+ if (pb->epoll_fd >= 0)
+ close(pb->epoll_fd);
+ free(pb->events);
+ free(pb);
+}
+
+static struct perf_cpu_buf *
+perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
+ int cpu, int map_key)
+{
+ struct perf_cpu_buf *cpu_buf;
+ char msg[STRERR_BUFSIZE];
+ int err;
+
+ cpu_buf = calloc(1, sizeof(*cpu_buf));
+ if (!cpu_buf)
+ return ERR_PTR(-ENOMEM);
+
+ cpu_buf->pb = pb;
+ cpu_buf->cpu = cpu;
+ cpu_buf->map_key = map_key;
+
+ cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
+ -1, PERF_FLAG_FD_CLOEXEC);
+ if (cpu_buf->fd < 0) {
+ err = -errno;
+ pr_warning("failed to open perf buffer event on cpu #%d: %s\n",
+ cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
+ goto error;
+ }
+
+ cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ cpu_buf->fd, 0);
+ if (cpu_buf->base == MAP_FAILED) {
+ cpu_buf->base = NULL;
+ err = -errno;
+ pr_warning("failed to mmap perf buffer on cpu #%d: %s\n",
+ cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
+ goto error;
+ }
+
+ if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
+ err = -errno;
+ pr_warning("failed to enable perf buffer event on cpu #%d: %s\n",
+ cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
+ goto error;
+ }
+
+ return cpu_buf;
+
+error:
+ perf_buffer__free_cpu_buf(pb, cpu_buf);
+ return (struct perf_cpu_buf *)ERR_PTR(err);
+}
+
+static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
+ struct perf_buffer_params *p);
+
+struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
+ const struct perf_buffer_opts *opts)
+{
+ struct perf_buffer_params p = {};
+ struct perf_event_attr attr = { 0, };
+
+ attr.config = PERF_COUNT_SW_BPF_OUTPUT,
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.sample_type = PERF_SAMPLE_RAW;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+
+ p.attr = &attr;
+ p.sample_cb = opts ? opts->sample_cb : NULL;
+ p.lost_cb = opts ? opts->lost_cb : NULL;
+ p.ctx = opts ? opts->ctx : NULL;
+
+ return __perf_buffer__new(map_fd, page_cnt, &p);
+}
+
+struct perf_buffer *
+perf_buffer__new_raw(int map_fd, size_t page_cnt,
+ const struct perf_buffer_raw_opts *opts)
+{
+ struct perf_buffer_params p = {};
+
+ p.attr = opts->attr;
+ p.event_cb = opts->event_cb;
+ p.ctx = opts->ctx;
+ p.cpu_cnt = opts->cpu_cnt;
+ p.cpus = opts->cpus;
+ p.map_keys = opts->map_keys;
+
+ return __perf_buffer__new(map_fd, page_cnt, &p);
+}
+
+static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
+ struct perf_buffer_params *p)
+{
+ struct bpf_map_info map = {};
+ char msg[STRERR_BUFSIZE];
+ struct perf_buffer *pb;
+ __u32 map_info_len;
+ int err, i;
+
+ if (page_cnt & (page_cnt - 1)) {
+ pr_warning("page count should be power of two, but is %zu\n",
+ page_cnt);
+ return ERR_PTR(-EINVAL);
+ }
+
+ map_info_len = sizeof(map);
+ err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
+ if (err) {
+ err = -errno;
+ pr_warning("failed to get map info for map FD %d: %s\n",
+ map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
+ return ERR_PTR(err);
+ }
+
+ if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
+ pr_warning("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
+ map.name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pb = calloc(1, sizeof(*pb));
+ if (!pb)
+ return ERR_PTR(-ENOMEM);
+
+ pb->event_cb = p->event_cb;
+ pb->sample_cb = p->sample_cb;
+ pb->lost_cb = p->lost_cb;
+ pb->ctx = p->ctx;
+
+ pb->page_size = getpagesize();
+ pb->mmap_size = pb->page_size * page_cnt;
+ pb->map_fd = map_fd;
+
+ pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
+ if (pb->epoll_fd < 0) {
+ err = -errno;
+ pr_warning("failed to create epoll instance: %s\n",
+ libbpf_strerror_r(err, msg, sizeof(msg)));
+ goto error;
+ }
+
+ if (p->cpu_cnt > 0) {
+ pb->cpu_cnt = p->cpu_cnt;
+ } else {
+ pb->cpu_cnt = libbpf_num_possible_cpus();
+ if (pb->cpu_cnt < 0) {
+ err = pb->cpu_cnt;
+ goto error;
+ }
+ if (map.max_entries < pb->cpu_cnt)
+ pb->cpu_cnt = map.max_entries;
+ }
+
+ pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
+ if (!pb->events) {
+ err = -ENOMEM;
+ pr_warning("failed to allocate events: out of memory\n");
+ goto error;
+ }
+ pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
+ if (!pb->cpu_bufs) {
+ err = -ENOMEM;
+ pr_warning("failed to allocate buffers: out of memory\n");
+ goto error;
+ }
+
+ for (i = 0; i < pb->cpu_cnt; i++) {
+ struct perf_cpu_buf *cpu_buf;
+ int cpu, map_key;
+
+ cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
+ map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
+
+ cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
+ if (IS_ERR(cpu_buf)) {
+ err = PTR_ERR(cpu_buf);
+ goto error;
+ }
+
+ pb->cpu_bufs[i] = cpu_buf;
+
+ err = bpf_map_update_elem(pb->map_fd, &map_key,
+ &cpu_buf->fd, 0);
+ if (err) {
+ err = -errno;
+ pr_warning("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
+ cpu, map_key, cpu_buf->fd,
+ libbpf_strerror_r(err, msg, sizeof(msg)));
+ goto error;
+ }
+
+ pb->events[i].events = EPOLLIN;
+ pb->events[i].data.ptr = cpu_buf;
+ if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
+ &pb->events[i]) < 0) {
+ err = -errno;
+ pr_warning("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
+ cpu, cpu_buf->fd,
+ libbpf_strerror_r(err, msg, sizeof(msg)));
+ goto error;
+ }
+ }
+
+ return pb;
+
+error:
+ if (pb)
+ perf_buffer__free(pb);
+ return ERR_PTR(err);
+}
+
+struct perf_sample_raw {
+ struct perf_event_header header;
+ uint32_t size;
+ char data[0];
+};
+
+struct perf_sample_lost {
+ struct perf_event_header header;
+ uint64_t id;
+ uint64_t lost;
+ uint64_t sample_id;
+};
+
+static enum bpf_perf_event_ret
+perf_buffer__process_record(struct perf_event_header *e, void *ctx)
+{
+ struct perf_cpu_buf *cpu_buf = ctx;
+ struct perf_buffer *pb = cpu_buf->pb;
+ void *data = e;
+
+ /* user wants full control over parsing perf event */
+ if (pb->event_cb)
+ return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
+
+ switch (e->type) {
+ case PERF_RECORD_SAMPLE: {
+ struct perf_sample_raw *s = data;
+
+ if (pb->sample_cb)
+ pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
+ break;
+ }
+ case PERF_RECORD_LOST: {
+ struct perf_sample_lost *s = data;
+
+ if (pb->lost_cb)
+ pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
+ break;
+ }
+ default:
+ pr_warning("unknown perf sample type %d\n", e->type);
+ return LIBBPF_PERF_EVENT_ERROR;
+ }
+ return LIBBPF_PERF_EVENT_CONT;
+}
+
+static int perf_buffer__process_records(struct perf_buffer *pb,
+ struct perf_cpu_buf *cpu_buf)
+{
+ enum bpf_perf_event_ret ret;
+
+ ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
+ pb->page_size, &cpu_buf->buf,
+ &cpu_buf->buf_size,
+ perf_buffer__process_record, cpu_buf);
+ if (ret != LIBBPF_PERF_EVENT_CONT)
+ return ret;
+ return 0;
+}
+
+int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
+{
+ int i, cnt, err;
+
+ cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
+ for (i = 0; i < cnt; i++) {
+ struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
+
+ err = perf_buffer__process_records(pb, cpu_buf);
+ if (err) {
+ pr_warning("error while processing records: %d\n", err);
+ return err;
+ }
+ }
+ return cnt < 0 ? -errno : cnt;
+}
+
struct bpf_prog_info_array_desc {
int array_offset; /* e.g. offset of jited_prog_insns */
int count_offset; /* e.g. offset of jited_prog_len */
@@ -3841,3 +5896,64 @@ void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
desc->array_offset, addr);
}
}
+
+int libbpf_num_possible_cpus(void)
+{
+ static const char *fcpu = "/sys/devices/system/cpu/possible";
+ int len = 0, n = 0, il = 0, ir = 0;
+ unsigned int start = 0, end = 0;
+ int tmp_cpus = 0;
+ static int cpus;
+ char buf[128];
+ int error = 0;
+ int fd = -1;
+
+ tmp_cpus = READ_ONCE(cpus);
+ if (tmp_cpus > 0)
+ return tmp_cpus;
+
+ fd = open(fcpu, O_RDONLY);
+ if (fd < 0) {
+ error = errno;
+ pr_warning("Failed to open file %s: %s\n",
+ fcpu, strerror(error));
+ return -error;
+ }
+ len = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (len <= 0) {
+ error = len ? errno : EINVAL;
+ pr_warning("Failed to read # of possible cpus from %s: %s\n",
+ fcpu, strerror(error));
+ return -error;
+ }
+ if (len == sizeof(buf)) {
+ pr_warning("File %s size overflow\n", fcpu);
+ return -EOVERFLOW;
+ }
+ buf[len] = '\0';
+
+ for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
+ /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
+ if (buf[ir] == ',' || buf[ir] == '\0') {
+ buf[ir] = '\0';
+ n = sscanf(&buf[il], "%u-%u", &start, &end);
+ if (n <= 0) {
+ pr_warning("Failed to get # CPUs from %s\n",
+ &buf[il]);
+ return -EINVAL;
+ } else if (n == 1) {
+ end = start;
+ }
+ tmp_cpus += end - start + 1;
+ il = ir + 1;
+ }
+ }
+ if (tmp_cpus <= 0) {
+ pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
+ return -EINVAL;
+ }
+
+ WRITE_ONCE(cpus, tmp_cpus);
+ return tmp_cpus;
+}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c5ff00515ce7..e8f70977d137 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -57,7 +57,7 @@ enum libbpf_print_level {
typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
const char *, va_list ap);
-LIBBPF_API void libbpf_set_print(libbpf_print_fn_t fn);
+LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn);
/* Hide internal to user */
struct bpf_object;
@@ -89,18 +89,26 @@ LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
LIBBPF_API void bpf_object__close(struct bpf_object *object);
+struct bpf_object_load_attr {
+ struct bpf_object *obj;
+ int log_level;
+ const char *target_btf_path;
+};
+
/* Load/unload object into/from kernel */
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
+LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
-LIBBPF_API const char *bpf_object__name(struct bpf_object *obj);
-LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj);
+LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
+LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
struct btf;
-LIBBPF_API struct btf *bpf_object__btf(struct bpf_object *obj);
+LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
LIBBPF_API struct bpf_program *
-bpf_object__find_program_by_title(struct bpf_object *obj, const char *title);
+bpf_object__find_program_by_title(const struct bpf_object *obj,
+ const char *title);
LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
#define bpf_object__for_each_safe(pos, tmp) \
@@ -112,7 +120,7 @@ LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *);
LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv,
bpf_object_clear_priv_t clear_priv);
-LIBBPF_API void *bpf_object__priv(struct bpf_object *prog);
+LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog);
LIBBPF_API int
libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
@@ -123,7 +131,7 @@ LIBBPF_API int libbpf_attach_type_by_name(const char *name,
/* Accessors of bpf_program */
struct bpf_program;
LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
- struct bpf_object *obj);
+ const struct bpf_object *obj);
#define bpf_object__for_each_program(pos, obj) \
for ((pos) = bpf_program__next(NULL, (obj)); \
@@ -131,24 +139,23 @@ LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
(pos) = bpf_program__next((pos), (obj)))
LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog,
- struct bpf_object *obj);
+ const struct bpf_object *obj);
-typedef void (*bpf_program_clear_priv_t)(struct bpf_program *,
- void *);
+typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv,
bpf_program_clear_priv_t clear_priv);
-LIBBPF_API void *bpf_program__priv(struct bpf_program *prog);
+LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog);
LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
__u32 ifindex);
-LIBBPF_API const char *bpf_program__title(struct bpf_program *prog,
+LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
bool needs_copy);
LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
__u32 kern_version);
-LIBBPF_API int bpf_program__fd(struct bpf_program *prog);
+LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
const char *path,
int instance);
@@ -159,6 +166,27 @@ LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path);
LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path);
LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
+struct bpf_link;
+
+LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
+
+LIBBPF_API struct bpf_link *
+bpf_program__attach_perf_event(struct bpf_program *prog, int pfd);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe,
+ const char *func_name);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe,
+ pid_t pid, const char *binary_path,
+ size_t func_offset);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_tracepoint(struct bpf_program *prog,
+ const char *tp_category,
+ const char *tp_name);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
+ const char *tp_name);
+
struct bpf_insn;
/*
@@ -221,7 +249,7 @@ typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
bpf_program_prep_t prep);
-LIBBPF_API int bpf_program__nth_fd(struct bpf_program *prog, int n);
+LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
/*
* Adjust type of BPF program. Default is kprobe.
@@ -240,14 +268,14 @@ LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
-LIBBPF_API bool bpf_program__is_socket_filter(struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_tracepoint(struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_raw_tracepoint(struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_kprobe(struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_sched_cls(struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_sched_act(struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_xdp(struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_perf_event(struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
/*
* No need for __attribute__((packed)), all members of 'bpf_map_def'
@@ -269,10 +297,10 @@ struct bpf_map_def {
*/
struct bpf_map;
LIBBPF_API struct bpf_map *
-bpf_object__find_map_by_name(struct bpf_object *obj, const char *name);
+bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
LIBBPF_API int
-bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name);
+bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
/*
* Get bpf_map through the offset of corresponding struct bpf_map_def
@@ -282,7 +310,7 @@ LIBBPF_API struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
LIBBPF_API struct bpf_map *
-bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
+bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
#define bpf_object__for_each_map(pos, obj) \
for ((pos) = bpf_map__next(NULL, (obj)); \
(pos) != NULL; \
@@ -290,22 +318,22 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
#define bpf_map__for_each bpf_object__for_each_map
LIBBPF_API struct bpf_map *
-bpf_map__prev(struct bpf_map *map, struct bpf_object *obj);
+bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
-LIBBPF_API int bpf_map__fd(struct bpf_map *map);
-LIBBPF_API const struct bpf_map_def *bpf_map__def(struct bpf_map *map);
-LIBBPF_API const char *bpf_map__name(struct bpf_map *map);
+LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
+LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
+LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv);
-LIBBPF_API void *bpf_map__priv(struct bpf_map *map);
+LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
-LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map);
-LIBBPF_API bool bpf_map__is_internal(struct bpf_map *map);
+LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
+LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
@@ -320,6 +348,7 @@ struct bpf_prog_load_attr {
enum bpf_attach_type expected_attach_type;
int ifindex;
int log_level;
+ int prog_flags;
};
LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
@@ -330,6 +359,26 @@ LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
+struct perf_buffer;
+
+typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu,
+ void *data, __u32 size);
+typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
+
+/* common use perf buffer options */
+struct perf_buffer_opts {
+ /* if specified, sample_cb is called for each sample */
+ perf_buffer_sample_fn sample_cb;
+ /* if specified, lost_cb is called for each batch of lost samples */
+ perf_buffer_lost_fn lost_cb;
+ /* ctx is provided to sample_cb and lost_cb */
+ void *ctx;
+};
+
+LIBBPF_API struct perf_buffer *
+perf_buffer__new(int map_fd, size_t page_cnt,
+ const struct perf_buffer_opts *opts);
+
enum bpf_perf_event_ret {
LIBBPF_PERF_EVENT_DONE = 0,
LIBBPF_PERF_EVENT_ERROR = -1,
@@ -337,6 +386,35 @@ enum bpf_perf_event_ret {
};
struct perf_event_header;
+
+typedef enum bpf_perf_event_ret
+(*perf_buffer_event_fn)(void *ctx, int cpu, struct perf_event_header *event);
+
+/* raw perf buffer options, giving most power and control */
+struct perf_buffer_raw_opts {
+ /* perf event attrs passed directly into perf_event_open() */
+ struct perf_event_attr *attr;
+ /* raw event callback */
+ perf_buffer_event_fn event_cb;
+ /* ctx is provided to event_cb */
+ void *ctx;
+ /* if cpu_cnt == 0, open all on all possible CPUs (up to the number of
+ * max_entries of given PERF_EVENT_ARRAY map)
+ */
+ int cpu_cnt;
+ /* if cpu_cnt > 0, cpus is an array of CPUs to open ring buffers on */
+ int *cpus;
+ /* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */
+ int *map_keys;
+};
+
+LIBBPF_API struct perf_buffer *
+perf_buffer__new_raw(int map_fd, size_t page_cnt,
+ const struct perf_buffer_raw_opts *opts);
+
+LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
+LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
+
typedef enum bpf_perf_event_ret
(*bpf_perf_event_print_t)(struct perf_event_header *hdr,
void *private_data);
@@ -447,6 +525,22 @@ bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
LIBBPF_API void
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
+/*
+ * A helper function to get the number of possible CPUs before looking up
+ * per-CPU maps. Negative errno is returned on failure.
+ *
+ * Example usage:
+ *
+ * int ncpus = libbpf_num_possible_cpus();
+ * if (ncpus < 0) {
+ * // error handling
+ * }
+ * long values[ncpus];
+ * bpf_map_lookup_elem(per_cpu_map_fd, key, values);
+ *
+ */
+LIBBPF_API int libbpf_num_possible_cpus(void);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 673001787cba..d04c7cb623ed 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -164,3 +164,29 @@ LIBBPF_0.0.3 {
bpf_map_freeze;
btf__finalize_data;
} LIBBPF_0.0.2;
+
+LIBBPF_0.0.4 {
+ global:
+ bpf_link__destroy;
+ bpf_object__load_xattr;
+ bpf_program__attach_kprobe;
+ bpf_program__attach_perf_event;
+ bpf_program__attach_raw_tracepoint;
+ bpf_program__attach_tracepoint;
+ bpf_program__attach_uprobe;
+ btf_dump__dump_type;
+ btf_dump__free;
+ btf_dump__new;
+ btf__parse_elf;
+ libbpf_num_possible_cpus;
+ perf_buffer__free;
+ perf_buffer__new;
+ perf_buffer__new_raw;
+ perf_buffer__poll;
+ xsk_umem__create;
+} LIBBPF_0.0.3;
+
+LIBBPF_0.0.5 {
+ global:
+ bpf_btf_get_next_id;
+} LIBBPF_0.0.4;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 789e435b5900..2e83a34f8c79 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -9,6 +9,8 @@
#ifndef __LIBBPF_LIBBPF_INTERNAL_H
#define __LIBBPF_LIBBPF_INTERNAL_H
+#include "libbpf.h"
+
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
@@ -21,7 +23,132 @@
#define BTF_PARAM_ENC(name, type) (name), (type)
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
-int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
- const char *str_sec, size_t str_len);
+#ifndef min
+# define min(x, y) ((x) < (y) ? (x) : (y))
+#endif
+#ifndef max
+# define max(x, y) ((x) < (y) ? (y) : (x))
+#endif
+#ifndef offsetofend
+# define offsetofend(TYPE, FIELD) \
+ (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
+#endif
+
+extern void libbpf_print(enum libbpf_print_level level,
+ const char *format, ...)
+ __attribute__((format(printf, 2, 3)));
+
+#define __pr(level, fmt, ...) \
+do { \
+ libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
+#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
+#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+
+int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
+ const char *str_sec, size_t str_len);
+
+struct btf_ext_info {
+ /*
+ * info points to the individual info section (e.g. func_info and
+ * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
+ */
+ void *info;
+ __u32 rec_size;
+ __u32 len;
+};
+
+#define for_each_btf_ext_sec(seg, sec) \
+ for (sec = (seg)->info; \
+ (void *)sec < (seg)->info + (seg)->len; \
+ sec = (void *)sec + sizeof(struct btf_ext_info_sec) + \
+ (seg)->rec_size * sec->num_info)
+
+#define for_each_btf_ext_rec(seg, sec, i, rec) \
+ for (i = 0, rec = (void *)&(sec)->data; \
+ i < (sec)->num_info; \
+ i++, rec = (void *)rec + (seg)->rec_size)
+
+struct btf_ext {
+ union {
+ struct btf_ext_header *hdr;
+ void *data;
+ };
+ struct btf_ext_info func_info;
+ struct btf_ext_info line_info;
+ struct btf_ext_info offset_reloc_info;
+ __u32 data_size;
+};
+
+struct btf_ext_info_sec {
+ __u32 sec_name_off;
+ __u32 num_info;
+ /* Followed by num_info * record_size number of bytes */
+ __u8 data[0];
+};
+
+/* The minimum bpf_func_info checked by the loader */
+struct bpf_func_info_min {
+ __u32 insn_off;
+ __u32 type_id;
+};
+
+/* The minimum bpf_line_info checked by the loader */
+struct bpf_line_info_min {
+ __u32 insn_off;
+ __u32 file_name_off;
+ __u32 line_off;
+ __u32 line_col;
+};
+
+/* The minimum bpf_offset_reloc checked by the loader
+ *
+ * Offset relocation captures the following data:
+ * - insn_off - instruction offset (in bytes) within a BPF program that needs
+ * its insn->imm field to be relocated with actual offset;
+ * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
+ * offset;
+ * - access_str_off - offset into corresponding .BTF string section. String
+ * itself encodes an accessed field using a sequence of field and array
+ * indicies, separated by colon (:). It's conceptually very close to LLVM's
+ * getelementptr ([0]) instruction's arguments for identifying offset to
+ * a field.
+ *
+ * Example to provide a better feel.
+ *
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample *s = ...;
+ * int x = &s->a; // encoded as "0:0" (a is field #0)
+ * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ *
+ * type_id for all relocs in this example will capture BTF type id of
+ * `struct sample`.
+ *
+ * Such relocation is emitted when using __builtin_preserve_access_index()
+ * Clang built-in, passing expression that captures field address, e.g.:
+ *
+ * bpf_probe_read(&dst, sizeof(dst),
+ * __builtin_preserve_access_index(&src->a.b.c));
+ *
+ * In this case Clang will emit offset relocation recording necessary data to
+ * be able to find offset of embedded `a.b.c` field within `src` struct.
+ *
+ * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
+ */
+struct bpf_offset_reloc {
+ __u32 insn_off;
+ __u32 type_id;
+ __u32 access_str_off;
+};
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 5e2aa83f637a..4b0b0364f5fc 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -101,6 +101,7 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
case BPF_PROG_TYPE_SK_REUSEPORT:
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
default:
break;
}
@@ -133,8 +134,8 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
return errno != EINVAL && errno != EOPNOTSUPP;
}
-int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
- const char *str_sec, size_t str_len)
+int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
+ const char *str_sec, size_t str_len)
{
struct btf_header hdr = {
.magic = BTF_MAGIC,
@@ -157,14 +158,9 @@ int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
- if (btf_fd < 0) {
- free(raw_btf);
- return 0;
- }
- close(btf_fd);
free(raw_btf);
- return 1;
+ return btf_fd;
}
static int load_sk_storage_btf(void)
@@ -190,7 +186,7 @@ static int load_sk_storage_btf(void)
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
};
- return libbpf__probe_raw_btf((char *)types, sizeof(types),
+ return libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
}
@@ -248,6 +244,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_HASH:
case BPF_MAP_TYPE_SOCKMAP:
case BPF_MAP_TYPE_CPUMAP:
case BPF_MAP_TYPE_XSKMAP:
diff --git a/tools/lib/bpf/libbpf_util.h b/tools/lib/bpf/libbpf_util.h
index da94c4cb2e4d..59c779c5790c 100644
--- a/tools/lib/bpf/libbpf_util.h
+++ b/tools/lib/bpf/libbpf_util.h
@@ -10,19 +10,6 @@
extern "C" {
#endif
-extern void libbpf_print(enum libbpf_print_level level,
- const char *format, ...)
- __attribute__((format(printf, 2, 3)));
-
-#define __pr(level, fmt, ...) \
-do { \
- libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
-#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
-#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
-
/* Use these barrier functions instead of smp_[rw]mb() when they are
* used in a libbpf header file. That way they can be built into the
* application that uses libbpf.
diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c
index 00e48ac5b806..b8064eedc177 100644
--- a/tools/lib/bpf/str_error.c
+++ b/tools/lib/bpf/str_error.c
@@ -11,7 +11,7 @@
*/
char *libbpf_strerror_r(int err, char *dst, int len)
{
- int ret = strerror_r(err, dst, len);
+ int ret = strerror_r(err < 0 ? -err : err, dst, len);
if (ret)
snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
return dst;
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index a3d1a302bc9c..842c4fd55859 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -29,7 +29,7 @@
#include "bpf.h"
#include "libbpf.h"
-#include "libbpf_util.h"
+#include "libbpf_internal.h"
#include "xsk.h"
#ifndef SOL_XDP
@@ -60,13 +60,12 @@ struct xsk_socket {
struct xsk_umem *umem;
struct xsk_socket_config config;
int fd;
- int xsks_map;
int ifindex;
int prog_fd;
- int qidconf_map_fd;
int xsks_map_fd;
__u32 queue_id;
char ifname[IFNAMSIZ];
+ bool zc;
};
struct xsk_nl_info {
@@ -75,23 +74,6 @@ struct xsk_nl_info {
int fd;
};
-/* For 32-bit systems, we need to use mmap2 as the offsets are 64-bit.
- * Unfortunately, it is not part of glibc.
- */
-static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags,
- int fd, __u64 offset)
-{
-#ifdef __NR_mmap2
- unsigned int page_shift = __builtin_ffs(getpagesize()) - 1;
- long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd,
- (off_t)(offset >> page_shift));
-
- return (void *)ret;
-#else
- return mmap(addr, length, prot, flags, fd, offset);
-#endif
-}
-
int xsk_umem__fd(const struct xsk_umem *umem)
{
return umem ? umem->fd : -EINVAL;
@@ -117,6 +99,7 @@ static void xsk_set_umem_config(struct xsk_umem_config *cfg,
cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
+ cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
return;
}
@@ -124,6 +107,7 @@ static void xsk_set_umem_config(struct xsk_umem_config *cfg,
cfg->comp_size = usr_cfg->comp_size;
cfg->frame_size = usr_cfg->frame_size;
cfg->frame_headroom = usr_cfg->frame_headroom;
+ cfg->flags = usr_cfg->flags;
}
static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
@@ -150,9 +134,10 @@ static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
return 0;
}
-int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
- struct xsk_ring_prod *fill, struct xsk_ring_cons *comp,
- const struct xsk_umem_config *usr_config)
+int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
+ __u64 size, struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *usr_config)
{
struct xdp_mmap_offsets off;
struct xdp_umem_reg mr;
@@ -183,6 +168,7 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
mr.len = size;
mr.chunk_size = umem->config.frame_size;
mr.headroom = umem->config.frame_headroom;
+ mr.flags = umem->config.flags;
err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
if (err) {
@@ -211,10 +197,9 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
goto out_socket;
}
- map = xsk_mmap(NULL, off.fr.desc +
- umem->config.fill_size * sizeof(__u64),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
- umem->fd, XDP_UMEM_PGOFF_FILL_RING);
+ map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
+ XDP_UMEM_PGOFF_FILL_RING);
if (map == MAP_FAILED) {
err = -errno;
goto out_socket;
@@ -225,13 +210,13 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
fill->size = umem->config.fill_size;
fill->producer = map + off.fr.producer;
fill->consumer = map + off.fr.consumer;
+ fill->flags = map + off.fr.flags;
fill->ring = map + off.fr.desc;
fill->cached_cons = umem->config.fill_size;
- map = xsk_mmap(NULL,
- off.cr.desc + umem->config.comp_size * sizeof(__u64),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
- umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING);
+ map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
+ XDP_UMEM_PGOFF_COMPLETION_RING);
if (map == MAP_FAILED) {
err = -errno;
goto out_mmap;
@@ -242,6 +227,7 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
comp->size = umem->config.comp_size;
comp->producer = map + off.cr.producer;
comp->consumer = map + off.cr.consumer;
+ comp->flags = map + off.cr.flags;
comp->ring = map + off.cr.desc;
*umem_ptr = umem;
@@ -256,6 +242,29 @@ out_umem_alloc:
return err;
}
+struct xsk_umem_config_v1 {
+ __u32 fill_size;
+ __u32 comp_size;
+ __u32 frame_size;
+ __u32 frame_headroom;
+};
+
+int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
+ __u64 size, struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *usr_config)
+{
+ struct xsk_umem_config config;
+
+ memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
+ config.flags = 0;
+
+ return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
+ &config);
+}
+asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2");
+asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4");
+
static int xsk_load_xdp_prog(struct xsk_socket *xsk)
{
static const int log_buf_size = 16 * 1024;
@@ -265,15 +274,11 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
/* This is the C-program:
* SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
* {
- * int *qidconf, index = ctx->rx_queue_index;
+ * int index = ctx->rx_queue_index;
*
* // A set entry here means that the correspnding queue_id
* // has an active AF_XDP socket bound to it.
- * qidconf = bpf_map_lookup_elem(&qidconf_map, &index);
- * if (!qidconf)
- * return XDP_ABORTED;
- *
- * if (*qidconf)
+ * if (bpf_map_lookup_elem(&xsks_map, &index))
* return bpf_redirect_map(&xsks_map, index, 0);
*
* return XDP_PASS;
@@ -286,15 +291,10 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, xsk->qidconf_map_fd),
+ BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- /* if r1 == 0 goto +8 */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
BPF_MOV32_IMM(BPF_REG_0, 2),
- /* r1 = *(u32 *)(r1 + 0) */
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
/* if r1 == 0 goto +5 */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
/* r2 = *(u32 *)(r10 - 4) */
@@ -327,24 +327,24 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
static int xsk_get_max_queues(struct xsk_socket *xsk)
{
- struct ethtool_channels channels;
- struct ifreq ifr;
+ struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
+ struct ifreq ifr = {};
int fd, err, ret;
fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd < 0)
return -errno;
- channels.cmd = ETHTOOL_GCHANNELS;
ifr.ifr_data = (void *)&channels;
- strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ);
+ memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
+ ifr.ifr_name[IFNAMSIZ - 1] = '\0';
err = ioctl(fd, SIOCETHTOOL, &ifr);
if (err && errno != EOPNOTSUPP) {
ret = -errno;
goto out;
}
- if (channels.max_combined == 0 || errno == EOPNOTSUPP)
+ if (err || channels.max_combined == 0)
/* If the device says it has no channels, then all traffic
* is sent to a single stream, so max queues = 1.
*/
@@ -366,18 +366,11 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk)
if (max_queues < 0)
return max_queues;
- fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "qidconf_map",
+ fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
sizeof(int), sizeof(int), max_queues, 0);
if (fd < 0)
return fd;
- xsk->qidconf_map_fd = fd;
- fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
- sizeof(int), sizeof(int), max_queues, 0);
- if (fd < 0) {
- close(xsk->qidconf_map_fd);
- return fd;
- }
xsk->xsks_map_fd = fd;
return 0;
@@ -385,10 +378,8 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk)
static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
{
- close(xsk->qidconf_map_fd);
+ bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
close(xsk->xsks_map_fd);
- xsk->qidconf_map_fd = -1;
- xsk->xsks_map_fd = -1;
}
static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
@@ -417,10 +408,9 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
if (err)
goto out_map_ids;
- for (i = 0; i < prog_info.nr_map_ids; i++) {
- if (xsk->qidconf_map_fd != -1 && xsk->xsks_map_fd != -1)
- break;
+ xsk->xsks_map_fd = -1;
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
fd = bpf_map_get_fd_by_id(map_ids[i]);
if (fd < 0)
continue;
@@ -431,11 +421,6 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
continue;
}
- if (!strcmp(map_info.name, "qidconf_map")) {
- xsk->qidconf_map_fd = fd;
- continue;
- }
-
if (!strcmp(map_info.name, "xsks_map")) {
xsk->xsks_map_fd = fd;
continue;
@@ -445,40 +430,18 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
}
err = 0;
- if (xsk->qidconf_map_fd < 0 || xsk->xsks_map_fd < 0) {
+ if (xsk->xsks_map_fd == -1)
err = -ENOENT;
- xsk_delete_bpf_maps(xsk);
- }
out_map_ids:
free(map_ids);
return err;
}
-static void xsk_clear_bpf_maps(struct xsk_socket *xsk)
-{
- int qid = false;
-
- bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id, &qid, 0);
- bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
-}
-
static int xsk_set_bpf_maps(struct xsk_socket *xsk)
{
- int qid = true, fd = xsk->fd, err;
-
- err = bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id, &qid, 0);
- if (err)
- goto out;
-
- err = bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id, &fd, 0);
- if (err)
- goto out;
-
- return 0;
-out:
- xsk_clear_bpf_maps(xsk);
- return err;
+ return bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id,
+ &xsk->fd, 0);
}
static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
@@ -497,26 +460,27 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
return err;
err = xsk_load_xdp_prog(xsk);
- if (err)
- goto out_maps;
+ if (err) {
+ xsk_delete_bpf_maps(xsk);
+ return err;
+ }
} else {
xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
err = xsk_lookup_bpf_maps(xsk);
- if (err)
- goto out_load;
+ if (err) {
+ close(xsk->prog_fd);
+ return err;
+ }
}
err = xsk_set_bpf_maps(xsk);
- if (err)
- goto out_load;
+ if (err) {
+ xsk_delete_bpf_maps(xsk);
+ close(xsk->prog_fd);
+ return err;
+ }
return 0;
-
-out_load:
- close(xsk->prog_fd);
-out_maps:
- xsk_delete_bpf_maps(xsk);
- return err;
}
int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
@@ -527,6 +491,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
void *rx_map = NULL, *tx_map = NULL;
struct sockaddr_xdp sxdp = {};
struct xdp_mmap_offsets off;
+ struct xdp_options opts;
struct xsk_socket *xsk;
socklen_t optlen;
int err;
@@ -561,7 +526,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
err = -errno;
goto out_socket;
}
- strncpy(xsk->ifname, ifname, IFNAMSIZ);
+ memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
+ xsk->ifname[IFNAMSIZ - 1] = '\0';
err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
if (err)
@@ -594,11 +560,10 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
}
if (rx) {
- rx_map = xsk_mmap(NULL, off.rx.desc +
- xsk->config.rx_size * sizeof(struct xdp_desc),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE,
- xsk->fd, XDP_PGOFF_RX_RING);
+ rx_map = mmap(NULL, off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ xsk->fd, XDP_PGOFF_RX_RING);
if (rx_map == MAP_FAILED) {
err = -errno;
goto out_socket;
@@ -608,16 +573,16 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
rx->size = xsk->config.rx_size;
rx->producer = rx_map + off.rx.producer;
rx->consumer = rx_map + off.rx.consumer;
+ rx->flags = rx_map + off.rx.flags;
rx->ring = rx_map + off.rx.desc;
}
xsk->rx = rx;
if (tx) {
- tx_map = xsk_mmap(NULL, off.tx.desc +
- xsk->config.tx_size * sizeof(struct xdp_desc),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE,
- xsk->fd, XDP_PGOFF_TX_RING);
+ tx_map = mmap(NULL, off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ xsk->fd, XDP_PGOFF_TX_RING);
if (tx_map == MAP_FAILED) {
err = -errno;
goto out_mmap_rx;
@@ -627,6 +592,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
tx->size = xsk->config.tx_size;
tx->producer = tx_map + off.tx.producer;
tx->consumer = tx_map + off.tx.consumer;
+ tx->flags = tx_map + off.tx.flags;
tx->ring = tx_map + off.tx.desc;
tx->cached_cons = xsk->config.tx_size;
}
@@ -643,8 +609,16 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
goto out_mmap_tx;
}
- xsk->qidconf_map_fd = -1;
- xsk->xsks_map_fd = -1;
+ xsk->prog_fd = -1;
+
+ optlen = sizeof(opts);
+ err = getsockopt(xsk->fd, SOL_XDP, XDP_OPTIONS, &opts, &optlen);
+ if (err) {
+ err = -errno;
+ goto out_mmap_tx;
+ }
+
+ xsk->zc = opts.flags & XDP_OPTIONS_ZEROCOPY;
if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
err = xsk_setup_xdp_prog(xsk);
@@ -708,8 +682,10 @@ void xsk_socket__delete(struct xsk_socket *xsk)
if (!xsk)
return;
- xsk_clear_bpf_maps(xsk);
- xsk_delete_bpf_maps(xsk);
+ if (xsk->prog_fd != -1) {
+ xsk_delete_bpf_maps(xsk);
+ close(xsk->prog_fd);
+ }
optlen = sizeof(off);
err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
index 82ea71a0f3ec..584f6820a639 100644
--- a/tools/lib/bpf/xsk.h
+++ b/tools/lib/bpf/xsk.h
@@ -32,6 +32,7 @@ struct name { \
__u32 *producer; \
__u32 *consumer; \
void *ring; \
+ __u32 *flags; \
}
DEFINE_XSK_RING(xsk_ring_prod);
@@ -76,6 +77,11 @@ xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
return &descs[idx & rx->mask];
}
+static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
+{
+ return *r->flags & XDP_RING_NEED_WAKEUP;
+}
+
static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
{
__u32 free_entries = r->cached_cons - r->cached_prod;
@@ -162,20 +168,37 @@ static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
return &((char *)umem_area)[addr];
}
+static inline __u64 xsk_umem__extract_addr(__u64 addr)
+{
+ return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
+}
+
+static inline __u64 xsk_umem__extract_offset(__u64 addr)
+{
+ return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+}
+
+static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
+{
+ return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
+}
+
LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
-#define XSK_UMEM__DEFAULT_FRAME_SHIFT 11 /* 2048 bytes */
+#define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */
#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
+#define XSK_UMEM__DEFAULT_FLAGS 0
struct xsk_umem_config {
__u32 fill_size;
__u32 comp_size;
__u32 frame_size;
__u32 frame_headroom;
+ __u32 flags;
};
/* Flags for the libbpf_flags field. */
@@ -195,6 +218,16 @@ LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp,
const struct xsk_umem_config *config);
+LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
+ void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *config);
+LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
+ void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *config);
LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
const char *ifname, __u32 queue_id,
struct xsk_umem *umem,
diff --git a/tools/lib/ctype.c b/tools/lib/ctype.c
new file mode 100644
index 000000000000..4d2e05fd3336
--- /dev/null
+++ b/tools/lib/ctype.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/lib/ctype.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/ctype.h>
+#include <linux/compiler.h>
+
+const unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c
index a88bd507091e..ac37022e9486 100644
--- a/tools/lib/find_bit.c
+++ b/tools/lib/find_bit.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* bit search implementation
*
* Copied from lib/find_bit.c to tools/lib/find_bit.c
@@ -11,11 +12,6 @@
*
* Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
* size and improve performance, 2015.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
index 904adb70a4f0..804f145e3113 100644
--- a/tools/lib/rbtree.c
+++ b/tools/lib/rbtree.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
(C) 2012 Michel Lespinasse <walken@google.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/lib/rbtree.c
*/
diff --git a/tools/lib/string.c b/tools/lib/string.c
index 93b3d4b6feac..f2ae1b87c719 100644
--- a/tools/lib/string.c
+++ b/tools/lib/string.c
@@ -17,6 +17,7 @@
#include <string.h>
#include <errno.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/compiler.h>
/**
@@ -106,3 +107,57 @@ size_t __weak strlcpy(char *dest, const char *src, size_t size)
}
return ret;
}
+
+/**
+ * skip_spaces - Removes leading whitespace from @str.
+ * @str: The string to be stripped.
+ *
+ * Returns a pointer to the first non-whitespace character in @str.
+ */
+char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
+
+/**
+ * strim - Removes leading and trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns a pointer to the first non-whitespace
+ * character in @s.
+ */
+char *strim(char *s)
+{
+ size_t size;
+ char *end;
+
+ size = strlen(s);
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end >= s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ return skip_spaces(s);
+}
+
+/**
+ * strreplace - Replace all occurrences of character in string.
+ * @s: The string to operate on.
+ * @old: The character being replaced.
+ * @new: The character @old is replaced with.
+ *
+ * Returns pointer to the nul byte at the end of @s.
+ */
+char *strreplace(char *s, char old, char new)
+{
+ for (; *s; ++s)
+ if (*s == old)
+ *s = new;
+ return s;
+}
diff --git a/tools/lib/symbol/kallsyms.c b/tools/lib/symbol/kallsyms.c
index 96d830545bbb..1a7a9f877095 100644
--- a/tools/lib/symbol/kallsyms.c
+++ b/tools/lib/symbol/kallsyms.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include <ctype.h>
#include "symbol/kallsyms.h"
#include <stdio.h>
#include <stdlib.h>
@@ -16,6 +15,19 @@ bool kallsyms__is_function(char symbol_type)
return symbol_type == 'T' || symbol_type == 'W';
}
+/*
+ * While we find nice hex chars, build a long_val.
+ * Return number of chars processed.
+ */
+int hex2u64(const char *ptr, u64 *long_val)
+{
+ char *p;
+
+ *long_val = strtoull(ptr, &p, 16);
+
+ return p - ptr;
+}
+
int kallsyms__parse(const char *filename, void *arg,
int (*process_symbol)(void *arg, const char *name,
char type, u64 start))
diff --git a/tools/lib/symbol/kallsyms.h b/tools/lib/symbol/kallsyms.h
index 72ab9870454b..bd988f7b18d4 100644
--- a/tools/lib/symbol/kallsyms.h
+++ b/tools/lib/symbol/kallsyms.h
@@ -18,6 +18,8 @@ static inline u8 kallsyms2elf_binding(char type)
return isupper(type) ? STB_GLOBAL : STB_LOCAL;
}
+int hex2u64(const char *ptr, u64 *long_val);
+
u8 kallsyms2elf_type(char type);
bool kallsyms__is_function(char symbol_type);
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 3292c290654f..a39cdd0d890d 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -62,15 +62,15 @@ set_plugin_dir := 1
# Set plugin_dir to preffered global plugin location
# If we install under $HOME directory we go under
-# $(HOME)/.traceevent/plugins
+# $(HOME)/.local/lib/traceevent/plugins
#
# We dont set PLUGIN_DIR in case we install under $HOME
# directory, because by default the code looks under:
-# $(HOME)/.traceevent/plugins by default.
+# $(HOME)/.local/lib/traceevent/plugins by default.
#
ifeq ($(plugin_dir),)
ifeq ($(prefix),$(HOME))
-override plugin_dir = $(HOME)/.traceevent/plugins
+override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
set_plugin_dir := 0
else
override plugin_dir = $(libdir)/traceevent/plugins
@@ -266,8 +266,8 @@ endef
define do_generate_dynamic_list_file
symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
- xargs echo "U W w" | tr ' ' '\n' | sort -u | xargs echo`;\
- if [ "$$symbol_type" = "U W w" ];then \
+ xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
+ if [ "$$symbol_type" = "U W" ];then \
(echo '{'; \
$(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
echo '};'; \
diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c
index 988587840c80..4faf52a65791 100644
--- a/tools/lib/traceevent/event-parse-api.c
+++ b/tools/lib/traceevent/event-parse-api.c
@@ -303,33 +303,6 @@ void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian)
}
/**
- * tep_is_latency_format - get if the latency output format is configured
- * @tep: a handle to the tep_handle
- *
- * This returns true if the latency output format is configured
- * If @tep is NULL, false is returned.
- */
-bool tep_is_latency_format(struct tep_handle *tep)
-{
- if (tep)
- return (tep->latency_format);
- return false;
-}
-
-/**
- * tep_set_latency_format - set the latency output format
- * @tep: a handle to the tep_handle
- * @lat: non zero for latency output format
- *
- * This sets the latency output format
- */
-void tep_set_latency_format(struct tep_handle *tep, int lat)
-{
- if (tep)
- tep->latency_format = lat;
-}
-
-/**
* tep_is_old_format - get if an old kernel is used
* @tep: a handle to the tep_handle
*
@@ -345,19 +318,6 @@ bool tep_is_old_format(struct tep_handle *tep)
}
/**
- * tep_set_print_raw - set a flag to force print in raw format
- * @tep: a handle to the tep_handle
- * @print_raw: the new value of the print_raw flag
- *
- * This sets a flag to force print in raw format
- */
-void tep_set_print_raw(struct tep_handle *tep, int print_raw)
-{
- if (tep)
- tep->print_raw = print_raw;
-}
-
-/**
* tep_set_test_filters - set a flag to test a filter string
* @tep: a handle to the tep_handle
* @test_filters: the new value of the test_filters flag
diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h
index 09aa142f7fdd..cee469803a34 100644
--- a/tools/lib/traceevent/event-parse-local.h
+++ b/tools/lib/traceevent/event-parse-local.h
@@ -28,8 +28,6 @@ struct tep_handle {
enum tep_endian file_bigendian;
enum tep_endian host_bigendian;
- int latency_format;
-
int old_format;
int cpus;
@@ -70,8 +68,6 @@ struct tep_handle {
int ld_offset;
int ld_size;
- int print_raw;
-
int test_filters;
int flags;
@@ -85,8 +81,6 @@ struct tep_handle {
/* cache */
struct tep_event *last_event;
-
- char *trace_clock;
};
void tep_free_event(struct tep_event *event);
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index b36b536a9fcb..bb22238debfe 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -142,6 +142,25 @@ static int cmdline_cmp(const void *a, const void *b)
return 0;
}
+/* Looking for where to place the key */
+static int cmdline_slot_cmp(const void *a, const void *b)
+{
+ const struct tep_cmdline *ca = a;
+ const struct tep_cmdline *cb = b;
+ const struct tep_cmdline *cb1 = cb + 1;
+
+ if (ca->pid < cb->pid)
+ return -1;
+
+ if (ca->pid > cb->pid) {
+ if (ca->pid <= cb1->pid)
+ return 0;
+ return 1;
+ }
+
+ return 0;
+}
+
struct cmdline_list {
struct cmdline_list *next;
char *comm;
@@ -239,6 +258,7 @@ static int add_new_comm(struct tep_handle *tep,
struct tep_cmdline *cmdline;
struct tep_cmdline key;
char *new_comm;
+ int cnt;
if (!pid)
return 0;
@@ -269,21 +289,43 @@ static int add_new_comm(struct tep_handle *tep,
errno = ENOMEM;
return -1;
}
+ tep->cmdlines = cmdlines;
- cmdlines[tep->cmdline_count].comm = strdup(comm);
- if (!cmdlines[tep->cmdline_count].comm) {
- free(cmdlines);
+ key.comm = strdup(comm);
+ if (!key.comm) {
errno = ENOMEM;
return -1;
}
- cmdlines[tep->cmdline_count].pid = pid;
-
- if (cmdlines[tep->cmdline_count].comm)
+ if (!tep->cmdline_count) {
+ /* no entries yet */
+ tep->cmdlines[0] = key;
tep->cmdline_count++;
+ return 0;
+ }
- qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
- tep->cmdlines = cmdlines;
+ /* Now find where we want to store the new cmdline */
+ cmdline = bsearch(&key, tep->cmdlines, tep->cmdline_count - 1,
+ sizeof(*tep->cmdlines), cmdline_slot_cmp);
+
+ cnt = tep->cmdline_count;
+ if (cmdline) {
+ /* cmdline points to the one before the spot we want */
+ cmdline++;
+ cnt -= cmdline - tep->cmdlines;
+
+ } else {
+ /* The new entry is either before or after the list */
+ if (key.pid > tep->cmdlines[tep->cmdline_count - 1].pid) {
+ tep->cmdlines[tep->cmdline_count++] = key;
+ return 0;
+ }
+ cmdline = &tep->cmdlines[0];
+ }
+ memmove(cmdline + 1, cmdline, (cnt * sizeof(*cmdline)));
+ *cmdline = key;
+
+ tep->cmdline_count++;
return 0;
}
@@ -351,16 +393,6 @@ int tep_override_comm(struct tep_handle *tep, const char *comm, int pid)
return _tep_register_comm(tep, comm, pid, true);
}
-int tep_register_trace_clock(struct tep_handle *tep, const char *trace_clock)
-{
- tep->trace_clock = strdup(trace_clock);
- if (!tep->trace_clock) {
- errno = ENOMEM;
- return -1;
- }
- return 0;
-}
-
struct func_map {
unsigned long long addr;
char *func;
@@ -5170,24 +5202,20 @@ out_failed:
}
}
-/**
- * tep_data_latency_format - parse the data for the latency format
- * @tep: a handle to the trace event parser context
- * @s: the trace_seq to write to
- * @record: the record to read from
- *
+/*
* This parses out the Latency format (interrupts disabled,
* need rescheduling, in hard/soft interrupt, preempt count
* and lock depth) and places it into the trace_seq.
*/
-void tep_data_latency_format(struct tep_handle *tep,
- struct trace_seq *s, struct tep_record *record)
+static void data_latency_format(struct tep_handle *tep, struct trace_seq *s,
+ char *format, struct tep_record *record)
{
static int check_lock_depth = 1;
static int check_migrate_disable = 1;
static int lock_depth_exists;
static int migrate_disable_exists;
unsigned int lat_flags;
+ struct trace_seq sq;
unsigned int pc;
int lock_depth = 0;
int migrate_disable = 0;
@@ -5195,6 +5223,7 @@ void tep_data_latency_format(struct tep_handle *tep,
int softirq;
void *data = record->data;
+ trace_seq_init(&sq);
lat_flags = parse_common_flags(tep, data);
pc = parse_common_pc(tep, data);
/* lock_depth may not always exist */
@@ -5222,7 +5251,7 @@ void tep_data_latency_format(struct tep_handle *tep,
hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
- trace_seq_printf(s, "%c%c%c",
+ trace_seq_printf(&sq, "%c%c%c",
(lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
(lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
'X' : '.',
@@ -5232,24 +5261,32 @@ void tep_data_latency_format(struct tep_handle *tep,
hardirq ? 'h' : softirq ? 's' : '.');
if (pc)
- trace_seq_printf(s, "%x", pc);
+ trace_seq_printf(&sq, "%x", pc);
else
- trace_seq_putc(s, '.');
+ trace_seq_printf(&sq, ".");
if (migrate_disable_exists) {
if (migrate_disable < 0)
- trace_seq_putc(s, '.');
+ trace_seq_printf(&sq, ".");
else
- trace_seq_printf(s, "%d", migrate_disable);
+ trace_seq_printf(&sq, "%d", migrate_disable);
}
if (lock_depth_exists) {
if (lock_depth < 0)
- trace_seq_putc(s, '.');
+ trace_seq_printf(&sq, ".");
else
- trace_seq_printf(s, "%d", lock_depth);
+ trace_seq_printf(&sq, "%d", lock_depth);
+ }
+
+ if (sq.state == TRACE_SEQ__MEM_ALLOC_FAILED) {
+ s->state = TRACE_SEQ__MEM_ALLOC_FAILED;
+ return;
}
+ trace_seq_terminate(&sq);
+ trace_seq_puts(s, sq.buffer);
+ trace_seq_destroy(&sq);
trace_seq_terminate(s);
}
@@ -5410,21 +5447,16 @@ int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline)
return cmdline->pid;
}
-/**
- * tep_event_info - parse the data into the print format
- * @s: the trace_seq to write to
- * @event: the handle to the event
- * @record: the record to read from
- *
+/*
* This parses the raw @data using the given @event information and
* writes the print format into the trace_seq.
*/
-void tep_event_info(struct trace_seq *s, struct tep_event *event,
- struct tep_record *record)
+static void print_event_info(struct trace_seq *s, char *format, bool raw,
+ struct tep_event *event, struct tep_record *record)
{
int print_pretty = 1;
- if (event->tep->print_raw || (event->flags & TEP_EVENT_FL_PRINTRAW))
+ if (raw || (event->flags & TEP_EVENT_FL_PRINTRAW))
tep_print_fields(s, record->data, record->size, event);
else {
@@ -5439,20 +5471,6 @@ void tep_event_info(struct trace_seq *s, struct tep_event *event,
trace_seq_terminate(s);
}
-static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
-{
- if (!trace_clock || !use_trace_clock)
- return true;
-
- if (!strcmp(trace_clock, "local") || !strcmp(trace_clock, "global")
- || !strcmp(trace_clock, "uptime") || !strcmp(trace_clock, "perf")
- || !strncmp(trace_clock, "mono", 4))
- return true;
-
- /* trace_clock is setting in tsc or counter mode */
- return false;
-}
-
/**
* tep_find_event_by_record - return the event from a given record
* @tep: a handle to the trace event parser context
@@ -5476,129 +5494,195 @@ tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record)
return tep_find_event(tep, type);
}
-/**
- * tep_print_event_task - Write the event task comm, pid and CPU
- * @tep: a handle to the trace event parser context
- * @s: the trace_seq to write to
- * @event: the handle to the record's event
- * @record: The record to get the event from
- *
- * Writes the tasks comm, pid and CPU to @s.
+/*
+ * Writes the timestamp of the record into @s. Time divisor and precision can be
+ * specified as part of printf @format string. Example:
+ * "%3.1000d" - divide the time by 1000 and print the first 3 digits
+ * before the dot. Thus, the timestamp "123456000" will be printed as
+ * "123.456"
*/
-void tep_print_event_task(struct tep_handle *tep, struct trace_seq *s,
- struct tep_event *event,
- struct tep_record *record)
+static void print_event_time(struct tep_handle *tep, struct trace_seq *s,
+ char *format, struct tep_event *event,
+ struct tep_record *record)
+{
+ unsigned long long time;
+ char *divstr;
+ int prec = 0, pr;
+ int div = 0;
+ int p10 = 1;
+
+ if (isdigit(*(format + 1)))
+ prec = atoi(format + 1);
+ divstr = strchr(format, '.');
+ if (divstr && isdigit(*(divstr + 1)))
+ div = atoi(divstr + 1);
+ time = record->ts;
+ if (div)
+ time /= div;
+ pr = prec;
+ while (pr--)
+ p10 *= 10;
+
+ if (p10 > 1 && p10 < time)
+ trace_seq_printf(s, "%5llu.%0*llu", time / p10, prec, time % p10);
+ else
+ trace_seq_printf(s, "%12llu\n", time);
+}
+
+struct print_event_type {
+ enum {
+ EVENT_TYPE_INT = 1,
+ EVENT_TYPE_STRING,
+ EVENT_TYPE_UNKNOWN,
+ } type;
+ char format[32];
+};
+
+static void print_string(struct tep_handle *tep, struct trace_seq *s,
+ struct tep_record *record, struct tep_event *event,
+ const char *arg, struct print_event_type *type)
{
- void *data = record->data;
const char *comm;
int pid;
- pid = parse_common_pid(tep, data);
- comm = find_cmdline(tep, pid);
+ if (strncmp(arg, TEP_PRINT_LATENCY, strlen(TEP_PRINT_LATENCY)) == 0) {
+ data_latency_format(tep, s, type->format, record);
+ } else if (strncmp(arg, TEP_PRINT_COMM, strlen(TEP_PRINT_COMM)) == 0) {
+ pid = parse_common_pid(tep, record->data);
+ comm = find_cmdline(tep, pid);
+ trace_seq_printf(s, type->format, comm);
+ } else if (strncmp(arg, TEP_PRINT_INFO_RAW, strlen(TEP_PRINT_INFO_RAW)) == 0) {
+ print_event_info(s, type->format, true, event, record);
+ } else if (strncmp(arg, TEP_PRINT_INFO, strlen(TEP_PRINT_INFO)) == 0) {
+ print_event_info(s, type->format, false, event, record);
+ } else if (strncmp(arg, TEP_PRINT_NAME, strlen(TEP_PRINT_NAME)) == 0) {
+ trace_seq_printf(s, type->format, event->name);
+ } else {
+ trace_seq_printf(s, "[UNKNOWN TEP TYPE %s]", arg);
+ }
- if (tep->latency_format)
- trace_seq_printf(s, "%8.8s-%-5d %3d", comm, pid, record->cpu);
- else
- trace_seq_printf(s, "%16s-%-5d [%03d]", comm, pid, record->cpu);
}
-/**
- * tep_print_event_time - Write the event timestamp
- * @tep: a handle to the trace event parser context
- * @s: the trace_seq to write to
- * @event: the handle to the record's event
- * @record: The record to get the event from
- * @use_trace_clock: Set to parse according to the @tep->trace_clock
- *
- * Writes the timestamp of the record into @s.
- */
-void tep_print_event_time(struct tep_handle *tep, struct trace_seq *s,
- struct tep_event *event,
- struct tep_record *record,
- bool use_trace_clock)
+static void print_int(struct tep_handle *tep, struct trace_seq *s,
+ struct tep_record *record, struct tep_event *event,
+ int arg, struct print_event_type *type)
{
- unsigned long secs;
- unsigned long usecs;
- unsigned long nsecs;
- int p;
- bool use_usec_format;
+ int param;
- use_usec_format = is_timestamp_in_us(tep->trace_clock, use_trace_clock);
- if (use_usec_format) {
- secs = record->ts / NSEC_PER_SEC;
- nsecs = record->ts - secs * NSEC_PER_SEC;
+ switch (arg) {
+ case TEP_PRINT_CPU:
+ param = record->cpu;
+ break;
+ case TEP_PRINT_PID:
+ param = parse_common_pid(tep, record->data);
+ break;
+ case TEP_PRINT_TIME:
+ return print_event_time(tep, s, type->format, event, record);
+ default:
+ return;
}
+ trace_seq_printf(s, type->format, param);
+}
- if (tep->latency_format) {
- tep_data_latency_format(tep, s, record);
- }
+static int tep_print_event_param_type(char *format,
+ struct print_event_type *type)
+{
+ char *str = format + 1;
+ int i = 1;
- if (use_usec_format) {
- if (tep->flags & TEP_NSEC_OUTPUT) {
- usecs = nsecs;
- p = 9;
- } else {
- usecs = (nsecs + 500) / NSEC_PER_USEC;
- /* To avoid usecs larger than 1 sec */
- if (usecs >= USEC_PER_SEC) {
- usecs -= USEC_PER_SEC;
- secs++;
- }
- p = 6;
+ type->type = EVENT_TYPE_UNKNOWN;
+ while (*str) {
+ switch (*str) {
+ case 'd':
+ case 'u':
+ case 'i':
+ case 'x':
+ case 'X':
+ case 'o':
+ type->type = EVENT_TYPE_INT;
+ break;
+ case 's':
+ type->type = EVENT_TYPE_STRING;
+ break;
}
-
- trace_seq_printf(s, " %5lu.%0*lu:", secs, p, usecs);
- } else
- trace_seq_printf(s, " %12llu:", record->ts);
+ str++;
+ i++;
+ if (type->type != EVENT_TYPE_UNKNOWN)
+ break;
+ }
+ memset(type->format, 0, 32);
+ memcpy(type->format, format, i < 32 ? i : 31);
+ return i;
}
/**
- * tep_print_event_data - Write the event data section
+ * tep_print_event - Write various event information
* @tep: a handle to the trace event parser context
* @s: the trace_seq to write to
- * @event: the handle to the record's event
* @record: The record to get the event from
- *
- * Writes the parsing of the record's data to @s.
+ * @format: a printf format string. Supported event fileds:
+ * TEP_PRINT_PID, "%d" - event PID
+ * TEP_PRINT_CPU, "%d" - event CPU
+ * TEP_PRINT_COMM, "%s" - event command string
+ * TEP_PRINT_NAME, "%s" - event name
+ * TEP_PRINT_LATENCY, "%s" - event latency
+ * TEP_PRINT_TIME, %d - event time stamp. A divisor and precision
+ * can be specified as part of this format string:
+ * "%precision.divisord". Example:
+ * "%3.1000d" - divide the time by 1000 and print the first
+ * 3 digits before the dot. Thus, the time stamp
+ * "123456000" will be printed as "123.456"
+ * TEP_PRINT_INFO, "%s" - event information. If any width is specified in
+ * the format string, the event information will be printed
+ * in raw format.
+ * Writes the specified event information into @s.
*/
-void tep_print_event_data(struct tep_handle *tep, struct trace_seq *s,
- struct tep_event *event,
- struct tep_record *record)
-{
- static const char *spaces = " "; /* 20 spaces */
- int len;
-
- trace_seq_printf(s, " %s: ", event->name);
-
- /* Space out the event names evenly. */
- len = strlen(event->name);
- if (len < 20)
- trace_seq_printf(s, "%.*s", 20 - len, spaces);
-
- tep_event_info(s, event, record);
-}
-
void tep_print_event(struct tep_handle *tep, struct trace_seq *s,
- struct tep_record *record, bool use_trace_clock)
-{
+ struct tep_record *record, const char *fmt, ...)
+{
+ struct print_event_type type;
+ char *format = strdup(fmt);
+ char *current = format;
+ char *str = format;
+ int offset;
+ va_list args;
struct tep_event *event;
- event = tep_find_event_by_record(tep, record);
- if (!event) {
- int i;
- int type = trace_parse_common_type(tep, record->data);
-
- do_warning("ug! no event found for type %d", type);
- trace_seq_printf(s, "[UNKNOWN TYPE %d]", type);
- for (i = 0; i < record->size; i++)
- trace_seq_printf(s, " %02x",
- ((unsigned char *)record->data)[i]);
+ if (!format)
return;
- }
- tep_print_event_task(tep, s, event, record);
- tep_print_event_time(tep, s, event, record, use_trace_clock);
- tep_print_event_data(tep, s, event, record);
+ event = tep_find_event_by_record(tep, record);
+ va_start(args, fmt);
+ while (*current) {
+ current = strchr(str, '%');
+ if (!current) {
+ trace_seq_puts(s, str);
+ break;
+ }
+ memset(&type, 0, sizeof(type));
+ offset = tep_print_event_param_type(current, &type);
+ *current = '\0';
+ trace_seq_puts(s, str);
+ current += offset;
+ switch (type.type) {
+ case EVENT_TYPE_STRING:
+ print_string(tep, s, record, event,
+ va_arg(args, char*), &type);
+ break;
+ case EVENT_TYPE_INT:
+ print_int(tep, s, record, event,
+ va_arg(args, int), &type);
+ break;
+ case EVENT_TYPE_UNKNOWN:
+ default:
+ trace_seq_printf(s, "[UNKNOWN TYPE]");
+ break;
+ }
+ str = current;
+
+ }
+ va_end(args);
+ free(format);
}
static int events_id_cmp(const void *a, const void *b)
@@ -6963,7 +7047,6 @@ void tep_free(struct tep_handle *tep)
free_handler(handle);
}
- free(tep->trace_clock);
free(tep->events);
free(tep->sort_events);
free(tep->func_resolver);
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 642f68ab5fb2..d438ee44289f 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -435,25 +435,24 @@ int tep_set_function_resolver(struct tep_handle *tep,
void tep_reset_function_resolver(struct tep_handle *tep);
int tep_register_comm(struct tep_handle *tep, const char *comm, int pid);
int tep_override_comm(struct tep_handle *tep, const char *comm, int pid);
-int tep_register_trace_clock(struct tep_handle *tep, const char *trace_clock);
int tep_register_function(struct tep_handle *tep, char *name,
unsigned long long addr, char *mod);
int tep_register_print_string(struct tep_handle *tep, const char *fmt,
unsigned long long addr);
bool tep_is_pid_registered(struct tep_handle *tep, int pid);
-void tep_print_event_task(struct tep_handle *tep, struct trace_seq *s,
- struct tep_event *event,
- struct tep_record *record);
-void tep_print_event_time(struct tep_handle *tep, struct trace_seq *s,
- struct tep_event *event,
- struct tep_record *record,
- bool use_trace_clock);
-void tep_print_event_data(struct tep_handle *tep, struct trace_seq *s,
- struct tep_event *event,
- struct tep_record *record);
+#define TEP_PRINT_INFO "INFO"
+#define TEP_PRINT_INFO_RAW "INFO_RAW"
+#define TEP_PRINT_COMM "COMM"
+#define TEP_PRINT_LATENCY "LATENCY"
+#define TEP_PRINT_NAME "NAME"
+#define TEP_PRINT_PID 1U
+#define TEP_PRINT_TIME 2U
+#define TEP_PRINT_CPU 3U
+
void tep_print_event(struct tep_handle *tep, struct trace_seq *s,
- struct tep_record *record, bool use_trace_clock);
+ struct tep_record *record, const char *fmt, ...)
+ __attribute__ ((format (printf, 4, 5)));
int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size,
int long_size);
@@ -525,8 +524,6 @@ tep_find_event_by_name(struct tep_handle *tep, const char *sys, const char *name
struct tep_event *
tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record);
-void tep_data_latency_format(struct tep_handle *tep,
- struct trace_seq *s, struct tep_record *record);
int tep_data_type(struct tep_handle *tep, struct tep_record *rec);
int tep_data_pid(struct tep_handle *tep, struct tep_record *rec);
int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec);
@@ -541,8 +538,6 @@ void tep_print_field(struct trace_seq *s, void *data,
struct tep_format_field *field);
void tep_print_fields(struct trace_seq *s, void *data,
int size __maybe_unused, struct tep_event *event);
-void tep_event_info(struct trace_seq *s, struct tep_event *event,
- struct tep_record *record);
int tep_strerror(struct tep_handle *tep, enum tep_errno errnum,
char *buf, size_t buflen);
@@ -566,12 +561,9 @@ bool tep_is_file_bigendian(struct tep_handle *tep);
void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian);
bool tep_is_local_bigendian(struct tep_handle *tep);
void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian);
-bool tep_is_latency_format(struct tep_handle *tep);
-void tep_set_latency_format(struct tep_handle *tep, int lat);
int tep_get_header_page_size(struct tep_handle *tep);
int tep_get_header_timestamp_size(struct tep_handle *tep);
bool tep_is_old_format(struct tep_handle *tep);
-void tep_set_print_raw(struct tep_handle *tep, int print_raw);
void tep_set_test_filters(struct tep_handle *tep, int test_filters);
struct tep_handle *tep_alloc(void);
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
index 8ca28de9337a..e1f7ddd5a6cf 100644
--- a/tools/lib/traceevent/event-plugin.c
+++ b/tools/lib/traceevent/event-plugin.c
@@ -18,7 +18,7 @@
#include "event-utils.h"
#include "trace-seq.h"
-#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
+#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/"
static struct registered_plugin_options {
struct registered_plugin_options *next;
diff --git a/tools/lib/vsprintf.c b/tools/lib/vsprintf.c
index e08ee147eab4..8780b4cdab21 100644
--- a/tools/lib/vsprintf.c
+++ b/tools/lib/vsprintf.c
@@ -23,3 +23,22 @@ int scnprintf(char * buf, size_t size, const char * fmt, ...)
return (i >= ssize) ? (ssize - 1) : i;
}
+
+int scnprintf_pad(char * buf, size_t size, const char * fmt, ...)
+{
+ ssize_t ssize = size;
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ if (i < (int) size) {
+ for (; i < (int) size; i++)
+ buf[i] = ' ';
+ buf[i] = 0x0;
+ }
+
+ return (i >= ssize) ? (ssize - 1) : i;
+}
diff --git a/tools/lib/zalloc.c b/tools/lib/zalloc.c
new file mode 100644
index 000000000000..9c856d59f56e
--- /dev/null
+++ b/tools/lib/zalloc.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: LGPL-2.1
+
+#include <stdlib.h>
+#include <linux/zalloc.h>
+
+void *zalloc(size_t size)
+{
+ return calloc(1, size);
+}
+
+void __zfree(void **ptr)
+{
+ free(*ptr);
+ *ptr = NULL;
+}