aboutsummaryrefslogtreecommitdiffstats
path: root/tools/lib/bpf/bpf.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-12-10 18:00:43 -0800
committerDavid S. Miller <davem@davemloft.net>2018-12-10 18:00:43 -0800
commitaddb0679839a1f74da6ec742137558be244dd0e9 (patch)
tree9de8daf0caa3d3f43184ad4b4e763306a445a8ce /tools/lib/bpf/bpf.c
parentneighbor: gc_list changes should be protected by table lock (diff)
parentMerge branch 'rename-info_cnt-to-nr_info' (diff)
downloadlinux-dev-addb0679839a1f74da6ec742137558be244dd0e9.tar.xz
linux-dev-addb0679839a1f74da6ec742137558be244dd0e9.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2018-12-11 The following pull-request contains BPF updates for your *net-next* tree. It has three minor merge conflicts, resolutions: 1) tools/testing/selftests/bpf/test_verifier.c Take first chunk with alignment_prevented_execution. 2) net/core/filter.c [...] case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): case bpf_ctx_range(struct __sk_buff, wire_len): return false; [...] 3) include/uapi/linux/bpf.h Take the second chunk for the two cases each. The main changes are: 1) Add support for BPF line info via BTF and extend libbpf as well as bpftool's program dump to annotate output with BPF C code to facilitate debugging and introspection, from Martin. 2) Add support for BPF_ALU | BPF_ARSH | BPF_{K,X} in interpreter and all JIT backends, from Jiong. 3) Improve BPF test coverage on archs with no efficient unaligned access by adding an "any alignment" flag to the BPF program load to forcefully disable verifier alignment checks, from David. 4) Add a new bpf_prog_test_run_xattr() API to libbpf which allows for proper use of BPF_PROG_TEST_RUN with data_out, from Lorenz. 5) Extend tc BPF programs to use a new __sk_buff field called wire_len for more accurate accounting of packets going to wire, from Petar. 6) Improve bpftool to allow dumping the trace pipe from it and add several improvements in bash completion and map/prog dump, from Quentin. 7) Optimize arm64 BPF JIT to always emit movn/movk/movk sequence for kernel addresses and add a dedicated BPF JIT backend allocator, from Ard. 8) Add a BPF helper function for IR remotes to report mouse movements, from Sean. 9) Various cleanups in BPF prog dump e.g. to make UAPI bpf_prog_info member naming consistent with existing conventions, from Yonghong and Song. 10) Misc cleanups and improvements in allowing to pass interface name via cmdline for xdp1 BPF example, from Matteo. 11) Fix a potential segfault in BPF sample loader's kprobes handling, from Daniel T. 12) Fix SPDX license in libbpf's README.rst, from Andrey. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--tools/lib/bpf/bpf.c124
1 files changed, 89 insertions, 35 deletions
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index ce1822194590..3caaa3428774 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -173,11 +173,36 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
-1);
}
+static void *
+alloc_zero_tailing_info(const void *orecord, __u32 cnt,
+ __u32 actual_rec_size, __u32 expected_rec_size)
+{
+ __u64 info_len = actual_rec_size * cnt;
+ void *info, *nrecord;
+ int i;
+
+ info = malloc(info_len);
+ if (!info)
+ return NULL;
+
+ /* zero out bytes kernel does not understand */
+ nrecord = info;
+ for (i = 0; i < cnt; i++) {
+ memcpy(nrecord, orecord, expected_rec_size);
+ memset(nrecord + expected_rec_size, 0,
+ actual_rec_size - expected_rec_size);
+ orecord += actual_rec_size;
+ nrecord += actual_rec_size;
+ }
+
+ return info;
+}
+
int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
char *log_buf, size_t log_buf_sz)
{
+ void *finfo = NULL, *linfo = NULL;
union bpf_attr attr;
- void *finfo = NULL;
__u32 name_len;
int fd;
@@ -201,53 +226,58 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
attr.func_info_rec_size = load_attr->func_info_rec_size;
attr.func_info_cnt = load_attr->func_info_cnt;
attr.func_info = ptr_to_u64(load_attr->func_info);
+ attr.line_info_rec_size = load_attr->line_info_rec_size;
+ attr.line_info_cnt = load_attr->line_info_cnt;
+ attr.line_info = ptr_to_u64(load_attr->line_info);
memcpy(attr.prog_name, load_attr->name,
min(name_len, BPF_OBJ_NAME_LEN - 1));
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
- if (fd >= 0 || !log_buf || !log_buf_sz)
+ if (fd >= 0)
return fd;
/* After bpf_prog_load, the kernel may modify certain attributes
* to give user space a hint how to deal with loading failure.
* Check to see whether we can make some changes and load again.
*/
- if (errno == E2BIG && attr.func_info_cnt &&
- attr.func_info_rec_size < load_attr->func_info_rec_size) {
- __u32 actual_rec_size = load_attr->func_info_rec_size;
- __u32 expected_rec_size = attr.func_info_rec_size;
- __u32 finfo_cnt = load_attr->func_info_cnt;
- __u64 finfo_len = actual_rec_size * finfo_cnt;
- const void *orecord;
- void *nrecord;
- int i;
-
- finfo = malloc(finfo_len);
- if (!finfo)
- /* further try with log buffer won't help */
- return fd;
-
- /* zero out bytes kernel does not understand */
- orecord = load_attr->func_info;
- nrecord = finfo;
- for (i = 0; i < load_attr->func_info_cnt; i++) {
- memcpy(nrecord, orecord, expected_rec_size);
- memset(nrecord + expected_rec_size, 0,
- actual_rec_size - expected_rec_size);
- orecord += actual_rec_size;
- nrecord += actual_rec_size;
+ while (errno == E2BIG && (!finfo || !linfo)) {
+ if (!finfo && attr.func_info_cnt &&
+ attr.func_info_rec_size < load_attr->func_info_rec_size) {
+ /* try with corrected func info records */
+ finfo = alloc_zero_tailing_info(load_attr->func_info,
+ load_attr->func_info_cnt,
+ load_attr->func_info_rec_size,
+ attr.func_info_rec_size);
+ if (!finfo)
+ goto done;
+
+ attr.func_info = ptr_to_u64(finfo);
+ attr.func_info_rec_size = load_attr->func_info_rec_size;
+ } else if (!linfo && attr.line_info_cnt &&
+ attr.line_info_rec_size <
+ load_attr->line_info_rec_size) {
+ linfo = alloc_zero_tailing_info(load_attr->line_info,
+ load_attr->line_info_cnt,
+ load_attr->line_info_rec_size,
+ attr.line_info_rec_size);
+ if (!linfo)
+ goto done;
+
+ attr.line_info = ptr_to_u64(linfo);
+ attr.line_info_rec_size = load_attr->line_info_rec_size;
+ } else {
+ break;
}
- /* try with corrected func info records */
- attr.func_info = ptr_to_u64(finfo);
- attr.func_info_rec_size = load_attr->func_info_rec_size;
-
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
- if (fd >= 0 || !log_buf || !log_buf_sz)
+ if (fd >= 0)
goto done;
}
+ if (!log_buf || !log_buf_sz)
+ goto done;
+
/* Try again with log */
attr.log_buf = ptr_to_u64(log_buf);
attr.log_size = log_buf_sz;
@@ -256,6 +286,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
done:
free(finfo);
+ free(linfo);
return fd;
}
@@ -279,9 +310,9 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
}
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
- size_t insns_cnt, int strict_alignment,
- const char *license, __u32 kern_version,
- char *log_buf, size_t log_buf_sz, int log_level)
+ size_t insns_cnt, __u32 prog_flags, const char *license,
+ __u32 kern_version, char *log_buf, size_t log_buf_sz,
+ int log_level)
{
union bpf_attr attr;
@@ -295,7 +326,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
attr.log_level = log_level;
log_buf[0] = 0;
attr.kern_version = kern_version;
- attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
+ attr.prog_flags = prog_flags;
return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
}
@@ -463,6 +494,29 @@ int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
return ret;
}
+int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
+{
+ union bpf_attr attr;
+ int ret;
+
+ if (!test_attr->data_out && test_attr->data_size_out > 0)
+ return -EINVAL;
+
+ bzero(&attr, sizeof(attr));
+ attr.test.prog_fd = test_attr->prog_fd;
+ attr.test.data_in = ptr_to_u64(test_attr->data_in);
+ attr.test.data_out = ptr_to_u64(test_attr->data_out);
+ attr.test.data_size_in = test_attr->data_size_in;
+ attr.test.data_size_out = test_attr->data_size_out;
+ attr.test.repeat = test_attr->repeat;
+
+ ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
+ test_attr->data_size_out = attr.test.data_size_out;
+ test_attr->retval = attr.test.retval;
+ test_attr->duration = attr.test.duration;
+ return ret;
+}
+
int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
{
union bpf_attr attr;