diff options
Diffstat (limited to 'tools')
89 files changed, 12387 insertions, 8516 deletions
diff --git a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst index ee53a122c0c7..8022b5321dbe 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst @@ -26,7 +26,7 @@ STRUCT_OPS COMMANDS | **bpftool** **struct_ops { show | list }** [*STRUCT_OPS_MAP*] | **bpftool** **struct_ops dump** [*STRUCT_OPS_MAP*] -| **bpftool** **struct_ops register** *OBJ* +| **bpftool** **struct_ops register** *OBJ* [*LINK_DIR*] | **bpftool** **struct_ops unregister** *STRUCT_OPS_MAP* | **bpftool** **struct_ops help** | @@ -51,10 +51,14 @@ DESCRIPTION for the given struct_ops. Otherwise, it dumps all struct_ops currently existing in the system. - **bpftool struct_ops register** *OBJ* + **bpftool struct_ops register** *OBJ* [*LINK_DIR*] Register bpf struct_ops from *OBJ*. All struct_ops under - the ELF section ".struct_ops" will be registered to - its kernel subsystem. + the ELF section ".struct_ops" and ".struct_ops.link" will + be registered to its kernel subsystem. For each + struct_ops in the ".struct_ops.link" section, a link + will be created. You can give *LINK_DIR* to provide a + directory path where these links will be pinned with the + same name as their corresponding map name. **bpftool struct_ops unregister** *STRUCT_OPS_MAP* Unregister the *STRUCT_OPS_MAP* from the kernel subsystem. diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 6c5e0e82da22..294de231db99 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -835,7 +835,7 @@ static void dotlabel_puts(const char *s) case '|': case ' ': putchar('\\'); - __fallthrough; + /* fallthrough */ default: putchar(*s); } diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 5a73ccf14332..1360c82ae732 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -1091,3 +1091,17 @@ const char *bpf_attach_type_input_str(enum bpf_attach_type t) default: return libbpf_bpf_attach_type_str(t); } } + +int pathname_concat(char *buf, int buf_sz, const char *path, + const char *name) +{ + int len; + + len = snprintf(buf, buf_sz, "%s/%s", path, name); + if (len < 0) + return -EINVAL; + if (len >= buf_sz) + return -ENAMETOOLONG; + + return 0; +} diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index f985b79cca27..d98dbc50cf4c 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -3,6 +3,8 @@ #include <errno.h> #include <linux/err.h> +#include <linux/netfilter.h> +#include <linux/netfilter_arp.h> #include <net/if.h> #include <stdio.h> #include <unistd.h> @@ -135,6 +137,18 @@ static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr) } } +void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr) +{ + jsonw_uint_field(json_wtr, "pf", + info->netfilter.pf); + jsonw_uint_field(json_wtr, "hook", + info->netfilter.hooknum); + jsonw_int_field(json_wtr, "prio", + info->netfilter.priority); + jsonw_uint_field(json_wtr, "flags", + info->netfilter.flags); +} + static int get_prog_info(int prog_id, struct bpf_prog_info *info) { __u32 len = sizeof(*info); @@ -195,6 +209,10 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) info->netns.netns_ino); show_link_attach_type_json(info->netns.attach_type, json_wtr); break; + case BPF_LINK_TYPE_NETFILTER: + netfilter_dump_json(info, json_wtr); + break; + default: break; } @@ -263,6 +281,68 @@ static void show_iter_plain(struct bpf_link_info *info) } } +static const char * const pf2name[] = { + [NFPROTO_INET] = "inet", + [NFPROTO_IPV4] = "ip", + [NFPROTO_ARP] = "arp", + [NFPROTO_NETDEV] = "netdev", + [NFPROTO_BRIDGE] = "bridge", + [NFPROTO_IPV6] = "ip6", +}; + +static const char * const inethook2name[] = { + [NF_INET_PRE_ROUTING] = "prerouting", + [NF_INET_LOCAL_IN] = "input", + [NF_INET_FORWARD] = "forward", + [NF_INET_LOCAL_OUT] = "output", + [NF_INET_POST_ROUTING] = "postrouting", +}; + +static const char * const arphook2name[] = { + [NF_ARP_IN] = "input", + [NF_ARP_OUT] = "output", +}; + +void netfilter_dump_plain(const struct bpf_link_info *info) +{ + const char *hookname = NULL, *pfname = NULL; + unsigned int hook = info->netfilter.hooknum; + unsigned int pf = info->netfilter.pf; + + if (pf < ARRAY_SIZE(pf2name)) + pfname = pf2name[pf]; + + switch (pf) { + case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */ + case NFPROTO_IPV4: + case NFPROTO_IPV6: + case NFPROTO_INET: + if (hook < ARRAY_SIZE(inethook2name)) + hookname = inethook2name[hook]; + break; + case NFPROTO_ARP: + if (hook < ARRAY_SIZE(arphook2name)) + hookname = arphook2name[hook]; + default: + break; + } + + if (pfname) + printf("\n\t%s", pfname); + else + printf("\n\tpf: %d", pf); + + if (hookname) + printf(" %s", hookname); + else + printf(", hook %u,", hook); + + printf(" prio %d", info->netfilter.priority); + + if (info->netfilter.flags) + printf(" flags 0x%x", info->netfilter.flags); +} + static int show_link_close_plain(int fd, struct bpf_link_info *info) { struct bpf_prog_info prog_info; @@ -301,6 +381,9 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) printf("\n\tnetns_ino %u ", info->netns.netns_ino); show_link_attach_type_plain(info->netns.attach_type); break; + case BPF_LINK_TYPE_NETFILTER: + netfilter_dump_plain(info); + break; default: break; } diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 00d11ca6d3f2..a49534d7eafa 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -264,4 +264,10 @@ static inline bool hashmap__empty(struct hashmap *map) return map ? hashmap__size(map) == 0 : true; } +int pathname_concat(char *buf, int buf_sz, const char *path, + const char *name); + +/* print netfilter bpf_link info */ +void netfilter_dump_plain(const struct bpf_link_info *info); +void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr); #endif diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c index c40e44c938ae..26a49965bf71 100644 --- a/tools/bpf/bpftool/net.c +++ b/tools/bpf/bpftool/net.c @@ -647,6 +647,108 @@ static int do_detach(int argc, char **argv) return 0; } +static int netfilter_link_compar(const void *a, const void *b) +{ + const struct bpf_link_info *nfa = a; + const struct bpf_link_info *nfb = b; + int delta; + + delta = nfa->netfilter.pf - nfb->netfilter.pf; + if (delta) + return delta; + + delta = nfa->netfilter.hooknum - nfb->netfilter.hooknum; + if (delta) + return delta; + + if (nfa->netfilter.priority < nfb->netfilter.priority) + return -1; + if (nfa->netfilter.priority > nfb->netfilter.priority) + return 1; + + return nfa->netfilter.flags - nfb->netfilter.flags; +} + +static void show_link_netfilter(void) +{ + unsigned int nf_link_len = 0, nf_link_count = 0; + struct bpf_link_info *nf_link_info = NULL; + __u32 id = 0; + + while (true) { + struct bpf_link_info info; + int fd, err; + __u32 len; + + err = bpf_link_get_next_id(id, &id); + if (err) { + if (errno == ENOENT) + break; + p_err("can't get next link: %s (id %d)", strerror(errno), id); + break; + } + + fd = bpf_link_get_fd_by_id(id); + if (fd < 0) { + p_err("can't get link by id (%u): %s", id, strerror(errno)); + continue; + } + + memset(&info, 0, sizeof(info)); + len = sizeof(info); + + err = bpf_link_get_info_by_fd(fd, &info, &len); + + close(fd); + + if (err) { + p_err("can't get link info for fd %d: %s", fd, strerror(errno)); + continue; + } + + if (info.type != BPF_LINK_TYPE_NETFILTER) + continue; + + if (nf_link_count >= nf_link_len) { + static const unsigned int max_link_count = INT_MAX / sizeof(info); + struct bpf_link_info *expand; + + if (nf_link_count > max_link_count) { + p_err("cannot handle more than %u links\n", max_link_count); + break; + } + + nf_link_len += 16; + + expand = realloc(nf_link_info, nf_link_len * sizeof(info)); + if (!expand) { + p_err("realloc: %s", strerror(errno)); + break; + } + + nf_link_info = expand; + } + + nf_link_info[nf_link_count] = info; + nf_link_count++; + } + + qsort(nf_link_info, nf_link_count, sizeof(*nf_link_info), netfilter_link_compar); + + for (id = 0; id < nf_link_count; id++) { + NET_START_OBJECT; + if (json_output) + netfilter_dump_json(&nf_link_info[id], json_wtr); + else + netfilter_dump_plain(&nf_link_info[id]); + + NET_DUMP_UINT("id", " prog_id %u", nf_link_info[id].prog_id); + NET_END_OBJECT; + } + + free(nf_link_info); +} + static int do_show(int argc, char **argv) { struct bpf_attach_info attach_info = {}; @@ -701,6 +803,10 @@ static int do_show(int argc, char **argv) NET_DUMP_UINT("id", "id %u", attach_info.flow_dissector_id); NET_END_ARRAY("\n"); + NET_START_ARRAY("netfilter", "%s:\n"); + show_link_netfilter(); + NET_END_ARRAY("\n"); + NET_END_OBJECT; if (json_output) jsonw_end_array(json_wtr); diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index e5b613a7974c..91b6075b2db3 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1476,19 +1476,6 @@ auto_attach_program(struct bpf_program *prog, const char *path) return err; } -static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name) -{ - int len; - - len = snprintf(buf, buf_sz, "%s/%s", path, name); - if (len < 0) - return -EINVAL; - if ((size_t)len >= buf_sz) - return -ENAMETOOLONG; - - return 0; -} - static int auto_attach_programs(struct bpf_object *obj, const char *path) { diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c index b389f4830e11..57c3da70aa31 100644 --- a/tools/bpf/bpftool/struct_ops.c +++ b/tools/bpf/bpftool/struct_ops.c @@ -475,21 +475,44 @@ static int do_unregister(int argc, char **argv) return cmd_retval(&res, true); } +static int pin_link(struct bpf_link *link, const char *pindir, + const char *name) +{ + char pinfile[PATH_MAX]; + int err; + + err = pathname_concat(pinfile, sizeof(pinfile), pindir, name); + if (err) + return -1; + + return bpf_link__pin(link, pinfile); +} + static int do_register(int argc, char **argv) { LIBBPF_OPTS(bpf_object_open_opts, open_opts); + __u32 link_info_len = sizeof(struct bpf_link_info); + struct bpf_link_info link_info = {}; struct bpf_map_info info = {}; __u32 info_len = sizeof(info); int nr_errs = 0, nr_maps = 0; + const char *linkdir = NULL; struct bpf_object *obj; struct bpf_link *link; struct bpf_map *map; const char *file; - if (argc != 1) + if (argc != 1 && argc != 2) usage(); file = GET_ARG(); + if (argc == 1) + linkdir = GET_ARG(); + + if (linkdir && mount_bpffs_for_pin(linkdir)) { + p_err("can't mount bpffs for pinning"); + return -1; + } if (verifier_logs) /* log_level1 + log_level2 + stats, but not stable UAPI */ @@ -519,21 +542,44 @@ static int do_register(int argc, char **argv) } nr_maps++; - bpf_link__disconnect(link); - bpf_link__destroy(link); - - if (!bpf_map_get_info_by_fd(bpf_map__fd(map), &info, - &info_len)) - p_info("Registered %s %s id %u", - get_kern_struct_ops_name(&info), - bpf_map__name(map), - info.id); - else + if (bpf_map_get_info_by_fd(bpf_map__fd(map), &info, + &info_len)) { /* Not p_err. The struct_ops was attached * successfully. */ p_info("Registered %s but can't find id: %s", bpf_map__name(map), strerror(errno)); + goto clean_link; + } + if (!(bpf_map__map_flags(map) & BPF_F_LINK)) { + p_info("Registered %s %s id %u", + get_kern_struct_ops_name(&info), + info.name, + info.id); + goto clean_link; + } + if (bpf_link_get_info_by_fd(bpf_link__fd(link), + &link_info, + &link_info_len)) { + p_err("Registered %s but can't find link id: %s", + bpf_map__name(map), strerror(errno)); + nr_errs++; + goto clean_link; + } + if (linkdir && pin_link(link, linkdir, info.name)) { + p_err("can't pin link %u for %s: %s", + link_info.id, info.name, + strerror(errno)); + nr_errs++; + goto clean_link; + } + p_info("Registered %s %s map id %u link id %u", + get_kern_struct_ops_name(&info), + info.name, info.id, link_info.id); + +clean_link: + bpf_link__disconnect(link); + bpf_link__destroy(link); } bpf_object__close(obj); @@ -562,7 +608,7 @@ static int do_help(int argc, char **argv) fprintf(stderr, "Usage: %1$s %2$s { show | list } [STRUCT_OPS_MAP]\n" " %1$s %2$s dump [STRUCT_OPS_MAP]\n" - " %1$s %2$s register OBJ\n" + " %1$s %2$s register OBJ [LINK_DIR]\n" " %1$s %2$s unregister STRUCT_OPS_MAP\n" " %1$s %2$s help\n" "\n" diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 3823100b7934..1bb11a6ee667 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -986,6 +986,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_LSM, BPF_PROG_TYPE_SK_LOOKUP, BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */ + BPF_PROG_TYPE_NETFILTER, }; enum bpf_attach_type { @@ -1050,6 +1051,7 @@ enum bpf_link_type { BPF_LINK_TYPE_PERF_EVENT = 7, BPF_LINK_TYPE_KPROBE_MULTI = 8, BPF_LINK_TYPE_STRUCT_OPS = 9, + BPF_LINK_TYPE_NETFILTER = 10, MAX_BPF_LINK_TYPE, }; @@ -1560,6 +1562,12 @@ union bpf_attr { */ __u64 cookie; } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; }; } link_create; @@ -6410,6 +6418,12 @@ struct bpf_link_info { struct { __u32 map_id; } struct_ops; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; }; } __attribute__((aligned(8))); @@ -6985,6 +6999,10 @@ struct bpf_rb_node { __u64 :64; } __attribute__((aligned(8))); +struct bpf_refcount { + __u32 :32; +} __attribute__((aligned(4))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index e7e1a8acc299..929a3baca8ef 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -291,4 +291,107 @@ enum libbpf_tristate { /* Helper macro to print out debug messages */ #define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args) +struct bpf_iter_num; + +extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak __ksym; +extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym; +extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym; + +#ifndef bpf_for_each +/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for + * using BPF open-coded iterators without having to write mundane explicit + * low-level loop logic. Instead, it provides for()-like generic construct + * that can be used pretty naturally. E.g., for some hypothetical cgroup + * iterator, you'd write: + * + * struct cgroup *cg, *parent_cg = <...>; + * + * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) { + * bpf_printk("Child cgroup id = %d", cg->cgroup_id); + * if (cg->cgroup_id == 123) + * break; + * } + * + * I.e., it looks almost like high-level for each loop in other languages, + * supports continue/break, and is verifiable by BPF verifier. + * + * For iterating integers, the difference betwen bpf_for_each(num, i, N, M) + * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to + * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int + * *`, not just `int`. So for integers bpf_for() is more convenient. + * + * Note: this macro relies on C99 feature of allowing to declare variables + * inside for() loop, bound to for() loop lifetime. It also utilizes GCC + * extension: __attribute__((cleanup(<func>))), supported by both GCC and + * Clang. + */ +#define bpf_for_each(type, cur, args...) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ + cleanup(bpf_iter_##type##_destroy))), \ + /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_##type##_new(&___it, ##args), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_##type##_destroy, (void *)0); \ + /* iteration and termination check */ \ + (((cur) = bpf_iter_##type##_next(&___it))); \ +) +#endif /* bpf_for_each */ + +#ifndef bpf_for +/* bpf_for(i, start, end) implements a for()-like looping construct that sets + * provided integer variable *i* to values starting from *start* through, + * but not including, *end*. It also proves to BPF verifier that *i* belongs + * to range [start, end), so this can be used for accessing arrays without + * extra checks. + * + * Note: *start* and *end* are assumed to be expressions with no side effects + * and whose values do not change throughout bpf_for() loop execution. They do + * not have to be statically known or constant, though. + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_for(i, start, end) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, (start), (end)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + ({ \ + /* iteration step */ \ + int *___t = bpf_iter_num_next(&___it); \ + /* termination and bounds check */ \ + (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \ + }); \ +) +#endif /* bpf_for */ + +#ifndef bpf_repeat +/* bpf_repeat(N) performs N iterations without exposing iteration number + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_repeat(N) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, 0, (N)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + bpf_iter_num_next(&___it); \ + /* nothing here */ \ +) +#endif /* bpf_repeat */ + #endif diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 49cd304ae3bc..1cbacf9e71f3 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -130,6 +130,7 @@ static const char * const link_type_name[] = { [BPF_LINK_TYPE_PERF_EVENT] = "perf_event", [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi", [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops", + [BPF_LINK_TYPE_NETFILTER] = "netfilter", }; static const char * const map_type_name[] = { @@ -201,6 +202,7 @@ static const char * const prog_type_name[] = { [BPF_PROG_TYPE_LSM] = "lsm", [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", [BPF_PROG_TYPE_SYSCALL] = "syscall", + [BPF_PROG_TYPE_NETFILTER] = "netfilter", }; static int __base_pr(enum libbpf_print_level level, const char *format, @@ -333,6 +335,7 @@ struct reloc_desc { struct { int map_idx; int sym_off; + int ext_idx; }; }; }; @@ -4042,7 +4045,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog, else reloc_desc->type = RELO_EXTERN_LD64; reloc_desc->insn_idx = insn_idx; - reloc_desc->sym_off = i; /* sym_off stores extern index */ + reloc_desc->ext_idx = i; return 0; } @@ -5811,8 +5814,8 @@ out: } /* base map load ldimm64 special constant, used also for log fixup logic */ -#define MAP_LDIMM64_POISON_BASE 2001000000 -#define MAP_LDIMM64_POISON_PFX "200100" +#define POISON_LDIMM64_MAP_BASE 2001000000 +#define POISON_LDIMM64_MAP_PFX "200100" static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, int insn_idx, struct bpf_insn *insn, @@ -5834,12 +5837,36 @@ static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, * invalid func unknown#2001000123 * where lower 123 is map index into obj->maps[] array */ - insn->imm = MAP_LDIMM64_POISON_BASE + map_idx; + insn->imm = POISON_LDIMM64_MAP_BASE + map_idx; insn++; } } +/* unresolved kfunc call special constant, used also for log fixup logic */ +#define POISON_CALL_KFUNC_BASE 2002000000 +#define POISON_CALL_KFUNC_PFX "2002" + +static void poison_kfunc_call(struct bpf_program *prog, int relo_idx, + int insn_idx, struct bpf_insn *insn, + int ext_idx, const struct extern_desc *ext) +{ + pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n", + prog->name, relo_idx, insn_idx, ext->name); + + /* we turn kfunc call into invalid helper call with identifiable constant */ + insn->code = BPF_JMP | BPF_CALL; + insn->dst_reg = 0; + insn->src_reg = 0; + insn->off = 0; + /* if this instruction is reachable (not a dead code), + * verifier will complain with something like: + * invalid func unknown#2001000123 + * where lower 123 is extern index into obj->externs[] array + */ + insn->imm = POISON_CALL_KFUNC_BASE + ext_idx; +} + /* Relocate data references within program code: * - map references; * - global variable references; @@ -5885,7 +5912,7 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) } break; case RELO_EXTERN_LD64: - ext = &obj->externs[relo->sym_off]; + ext = &obj->externs[relo->ext_idx]; if (ext->type == EXT_KCFG) { if (obj->gen_loader) { insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; @@ -5907,14 +5934,14 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) } break; case RELO_EXTERN_CALL: - ext = &obj->externs[relo->sym_off]; + ext = &obj->externs[relo->ext_idx]; insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; if (ext->is_set) { insn[0].imm = ext->ksym.kernel_btf_id; insn[0].off = ext->ksym.btf_fd_idx; - } else { /* unresolved weak kfunc */ - insn[0].imm = 0; - insn[0].off = 0; + } else { /* unresolved weak kfunc call */ + poison_kfunc_call(prog, i, relo->insn_idx, insn, + relo->ext_idx, ext); } break; case RELO_SUBPROG_ADDR: @@ -7022,13 +7049,13 @@ static void fixup_log_missing_map_load(struct bpf_program *prog, char *buf, size_t buf_sz, size_t log_sz, char *line1, char *line2, char *line3) { - /* Expected log for failed and not properly guarded CO-RE relocation: + /* Expected log for failed and not properly guarded map reference: * line1 -> 123: (85) call unknown#2001000345 * line2 -> invalid func unknown#2001000345 * line3 -> <anything else or end of buffer> * * "123" is the index of the instruction that was poisoned. - * "345" in "2001000345" are map index in obj->maps to fetch map name. + * "345" in "2001000345" is a map index in obj->maps to fetch map name. */ struct bpf_object *obj = prog->obj; const struct bpf_map *map; @@ -7038,7 +7065,7 @@ static void fixup_log_missing_map_load(struct bpf_program *prog, if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2) return; - map_idx -= MAP_LDIMM64_POISON_BASE; + map_idx -= POISON_LDIMM64_MAP_BASE; if (map_idx < 0 || map_idx >= obj->nr_maps) return; map = &obj->maps[map_idx]; @@ -7051,6 +7078,39 @@ static void fixup_log_missing_map_load(struct bpf_program *prog, patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); } +static void fixup_log_missing_kfunc_call(struct bpf_program *prog, + char *buf, size_t buf_sz, size_t log_sz, + char *line1, char *line2, char *line3) +{ + /* Expected log for failed and not properly guarded kfunc call: + * line1 -> 123: (85) call unknown#2002000345 + * line2 -> invalid func unknown#2002000345 + * line3 -> <anything else or end of buffer> + * + * "123" is the index of the instruction that was poisoned. + * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name. + */ + struct bpf_object *obj = prog->obj; + const struct extern_desc *ext; + int insn_idx, ext_idx; + char patch[128]; + + if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2) + return; + + ext_idx -= POISON_CALL_KFUNC_BASE; + if (ext_idx < 0 || ext_idx >= obj->nr_extern) + return; + ext = &obj->externs[ext_idx]; + + snprintf(patch, sizeof(patch), + "%d: <invalid kfunc call>\n" + "kfunc '%s' is referenced but wasn't resolved\n", + insn_idx, ext->name); + + patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); +} + static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz) { /* look for familiar error patterns in last N lines of the log */ @@ -7070,23 +7130,33 @@ static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_s if (!cur_line) return; - /* failed CO-RE relocation case */ if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) { prev_line = find_prev_line(buf, cur_line); if (!prev_line) continue; + /* failed CO-RE relocation case */ fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz, prev_line, cur_line, next_line); return; - } else if (str_has_pfx(cur_line, "invalid func unknown#"MAP_LDIMM64_POISON_PFX)) { + } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) { prev_line = find_prev_line(buf, cur_line); if (!prev_line) continue; + /* reference to uncreated BPF map */ fixup_log_missing_map_load(prog, buf, buf_sz, log_sz, prev_line, cur_line, next_line); return; + } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) { + prev_line = find_prev_line(buf, cur_line); + if (!prev_line) + continue; + + /* reference to unresolved kfunc */ + fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz, + prev_line, cur_line, next_line); + return; } } } @@ -7098,7 +7168,7 @@ static int bpf_program_record_relos(struct bpf_program *prog) for (i = 0; i < prog->nr_reloc; i++) { struct reloc_desc *relo = &prog->reloc_desc[i]; - struct extern_desc *ext = &obj->externs[relo->sym_off]; + struct extern_desc *ext = &obj->externs[relo->ext_idx]; int kind; switch (relo->type) { @@ -7536,8 +7606,9 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, kern_btf, kfunc_proto_id); if (ret <= 0) { - pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n", - ext->name, local_func_proto_id, kfunc_proto_id); + pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n", + ext->name, local_func_proto_id, + mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id); return -EINVAL; } @@ -7571,8 +7642,8 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64. */ ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; - pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n", - ext->name, kfunc_id); + pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n", + ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id); return 0; } @@ -8641,6 +8712,7 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE), SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE), + SEC_DEF("netfilter", NETFILTER, 0, SEC_NONE), }; static size_t custom_sec_def_cnt; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 4f3bc968ff8e..6065f408a59c 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -180,6 +180,7 @@ static int probe_prog_load(enum bpf_prog_type prog_type, case BPF_PROG_TYPE_SK_REUSEPORT: case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_CGROUP_SYSCTL: + case BPF_PROG_TYPE_NETFILTER: break; default: return -EOPNOTSUPP; diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index dbd2c729781a..209811b1993a 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -14,7 +14,8 @@ * type ID of a struct in program BTF. * * The 'local_type_id' parameter must be a known constant. - * The 'meta' parameter is a hidden argument that is ignored. + * The 'meta' parameter is rewritten by the verifier, no need for BPF + * program to set it. * Returns * A pointer to an object of the type corresponding to the passed in * 'local_type_id', or NULL on failure. @@ -28,7 +29,8 @@ extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; * Free an allocated object. All fields of the object that require * destruction will be destructed before the storage is freed. * - * The 'meta' parameter is a hidden argument that is ignored. + * The 'meta' parameter is rewritten by the verifier, no need for BPF + * program to set it. * Returns * Void. */ @@ -38,18 +40,50 @@ extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) /* Description + * Increment the refcount on a refcounted local kptr, turning the + * non-owning reference input into an owning reference in the process. + * + * The 'meta' parameter is rewritten by the verifier, no need for BPF + * program to set it. + * Returns + * An owning reference to the object pointed to by 'kptr' + */ +extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; + +/* Convenience macro to wrap over bpf_refcount_acquire_impl */ +#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL) + +/* Description * Add a new entry to the beginning of the BPF linked list. + * + * The 'meta' and 'off' parameters are rewritten by the verifier, no need + * for BPF programs to set them * Returns - * Void. + * 0 if the node was successfully added + * -EINVAL if the node wasn't added because it's already in a list */ -extern void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; +extern int bpf_list_push_front_impl(struct bpf_list_head *head, + struct bpf_list_node *node, + void *meta, __u64 off) __ksym; + +/* Convenience macro to wrap over bpf_list_push_front_impl */ +#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0) /* Description * Add a new entry to the end of the BPF linked list. + * + * The 'meta' and 'off' parameters are rewritten by the verifier, no need + * for BPF programs to set them * Returns - * Void. + * 0 if the node was successfully added + * -EINVAL if the node wasn't added because it's already in a list */ -extern void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; +extern int bpf_list_push_back_impl(struct bpf_list_head *head, + struct bpf_list_node *node, + void *meta, __u64 off) __ksym; + +/* Convenience macro to wrap over bpf_list_push_back_impl */ +#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0) /* Description * Remove the entry at the beginning of the BPF linked list. @@ -75,11 +109,19 @@ extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, /* Description * Add 'node' to rbtree with root 'root' using comparator 'less' + * + * The 'meta' and 'off' parameters are rewritten by the verifier, no need + * for BPF programs to set them * Returns - * Nothing + * 0 if the node was successfully added + * -EINVAL if the node wasn't added because it's already in a tree */ -extern void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, - bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) __ksym; +extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, + bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), + void *meta, __u64 off) __ksym; + +/* Convenience macro to wrap over bpf_rbtree_add_impl */ +#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) /* Description * Return the first (leftmost) node in input tree diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index fe847ebfb731..52785ba671e6 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -28,6 +28,11 @@ struct bpf_testmod_struct_arg_2 { long b; }; +struct bpf_testmod_struct_arg_3 { + int a; + int b[]; +}; + __diag_push(); __diag_ignore_all("-Wmissing-prototypes", "Global functions as their definitions will be in bpf_testmod.ko BTF"); @@ -63,6 +68,12 @@ bpf_testmod_test_struct_arg_5(void) { return bpf_testmod_test_struct_arg_result; } +noinline int +bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) { + bpf_testmod_test_struct_arg_result = a->b[0]; + return bpf_testmod_test_struct_arg_result; +} + __bpf_kfunc void bpf_testmod_test_mod_kfunc(int i) { @@ -195,6 +206,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, }; struct bpf_testmod_struct_arg_1 struct_arg1 = {10}; struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3}; + struct bpf_testmod_struct_arg_3 *struct_arg3; int i = 1; while (bpf_testmod_return_ptr(i)) @@ -206,6 +218,14 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2); (void)bpf_testmod_test_struct_arg_5(); + struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + + sizeof(int)), GFP_KERNEL); + if (struct_arg3 != NULL) { + struct_arg3->b[0] = 1; + (void)bpf_testmod_test_struct_arg_6(struct_arg3); + kfree(struct_arg3); + } + /* This is always true. Use the check to make sure the compiler * doesn't remove bpf_testmod_loop_test. */ diff --git a/tools/testing/selftests/bpf/prog_tests/access_variable_array.c b/tools/testing/selftests/bpf/prog_tests/access_variable_array.c new file mode 100644 index 000000000000..08131782437c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/access_variable_array.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Bytedance */ + +#include <test_progs.h> +#include "test_access_variable_array.skel.h" + +void test_access_variable_array(void) +{ + struct test_access_variable_array *skel; + + skel = test_access_variable_array__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_access_variable_array__open_and_load")) + return; + + test_access_variable_array__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 0ed8132ce1c3..f63309fd0e28 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -84,11 +84,11 @@ static struct { { "double_push_back", "arg#1 expected pointer to allocated object" }, { "no_node_value_type", "bpf_list_node not found at offset=0" }, { "incorrect_value_type", - "operation on bpf_list_head expects arg#1 bpf_list_node at offset=0 in struct foo, " + "operation on bpf_list_head expects arg#1 bpf_list_node at offset=40 in struct foo, " "but arg is at offset=0 in struct bar" }, { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, - { "incorrect_node_off1", "bpf_list_node not found at offset=1" }, - { "incorrect_node_off2", "arg#1 offset=40, but expected bpf_list_node at offset=0 in struct foo" }, + { "incorrect_node_off1", "bpf_list_node not found at offset=41" }, + { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=40 in struct foo" }, { "no_head_type", "bpf_list_head not found at offset=0" }, { "incorrect_head_var_off1", "R1 doesn't have constant offset" }, { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, @@ -266,6 +266,59 @@ end: return NULL; } +static void list_and_rb_node_same_struct(bool refcount_field) +{ + int bpf_rb_node_btf_id, bpf_refcount_btf_id, foo_btf_id; + struct btf *btf; + int id, err; + + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + return; + + bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 24); + if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node")) + return; + + if (refcount_field) { + bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4); + if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount")) + return; + } + + id = btf__add_struct(btf, "bar", refcount_field ? 44 : 40); + if (!ASSERT_GT(id, 0, "btf__add_struct bar")) + return; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + return; + err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + return; + if (refcount_field) { + err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 320, 0); + if (!ASSERT_OK(err, "btf__add_field bar::ref")) + return; + } + + foo_btf_id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo")) + return; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + return; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + return; + id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0); + if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a")) + return; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf"); + btf__free(btf); +} + static void test_btf(void) { struct btf *btf = NULL; @@ -717,39 +770,12 @@ static void test_btf(void) } while (test__start_subtest("btf: list_node and rb_node in same struct")) { - btf = init_btf(); - if (!ASSERT_OK_PTR(btf, "init_btf")) - break; - - id = btf__add_struct(btf, "bpf_rb_node", 24); - if (!ASSERT_EQ(id, 5, "btf__add_struct bpf_rb_node")) - break; - id = btf__add_struct(btf, "bar", 40); - if (!ASSERT_EQ(id, 6, "btf__add_struct bar")) - break; - err = btf__add_field(btf, "a", LIST_NODE, 0, 0); - if (!ASSERT_OK(err, "btf__add_field bar::a")) - break; - err = btf__add_field(btf, "c", 5, 128, 0); - if (!ASSERT_OK(err, "btf__add_field bar::c")) - break; - - id = btf__add_struct(btf, "foo", 20); - if (!ASSERT_EQ(id, 7, "btf__add_struct foo")) - break; - err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); - if (!ASSERT_OK(err, "btf__add_field foo::a")) - break; - err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); - if (!ASSERT_OK(err, "btf__add_field foo::b")) - break; - id = btf__add_decl_tag(btf, "contains:bar:a", 7, 0); - if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:bar:a")) - break; + list_and_rb_node_same_struct(true); + break; + } - err = btf__load_into_kernel(btf); - ASSERT_EQ(err, -EINVAL, "check btf"); - btf__free(btf); + while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) { + list_and_rb_node_same_struct(false); break; } } diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c index bc27170bdeb0..dba71d98a227 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -135,6 +135,35 @@ cleanup: test_log_fixup__destroy(skel); } +static void missing_kfunc(void) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.use_missing_kfunc, true); + bpf_program__set_log_buf(skel->progs.use_missing_kfunc, log_buf, sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_HAS_SUBSTR(log_buf, + "0: <invalid kfunc call>\n" + "kfunc 'bpf_nonexistent_kfunc' is referenced but wasn't resolved\n", + "log_buf"); + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); + +cleanup: + test_log_fixup__destroy(skel); +} + void test_log_fixup(void) { if (test__start_subtest("bad_core_relo_trunc_none")) @@ -147,4 +176,6 @@ void test_log_fixup(void) bad_core_relo_subprog(); if (test__start_subtest("missing_map")) missing_map(); + if (test__start_subtest("missing_kfunc")) + missing_kfunc(); } diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c index 156fa95c42f6..e9300c96607d 100644 --- a/tools/testing/selftests/bpf/prog_tests/rbtree.c +++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c @@ -77,6 +77,29 @@ static void test_rbtree_first_and_remove(void) rbtree__destroy(skel); } +static void test_rbtree_api_release_aliasing(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct rbtree *skel; + int ret; + + skel = rbtree__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_api_release_aliasing), &opts); + ASSERT_OK(ret, "rbtree_api_release_aliasing"); + ASSERT_OK(opts.retval, "rbtree_api_release_aliasing retval"); + ASSERT_EQ(skel->data->first_data[0], 42, "rbtree_api_release_aliasing first rbtree_remove()"); + ASSERT_EQ(skel->data->first_data[1], -1, "rbtree_api_release_aliasing second rbtree_remove()"); + + rbtree__destroy(skel); +} + void test_rbtree_success(void) { if (test__start_subtest("rbtree_add_nodes")) @@ -85,6 +108,8 @@ void test_rbtree_success(void) test_rbtree_add_and_remove(); if (test__start_subtest("rbtree_first_and_remove")) test_rbtree_first_and_remove(); + if (test__start_subtest("rbtree_api_release_aliasing")) + test_rbtree_api_release_aliasing(); } #define BTF_FAIL_TEST(suffix) \ diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c new file mode 100644 index 000000000000..2ab23832062d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <test_progs.h> +#include <network_helpers.h> + +#include "refcounted_kptr.skel.h" +#include "refcounted_kptr_fail.skel.h" + +void test_refcounted_kptr(void) +{ + RUN_TESTS(refcounted_kptr); +} + +void test_refcounted_kptr_fail(void) +{ + RUN_TESTS(refcounted_kptr_fail); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 8f09e1ea3ba7..141c1e5944ee 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -20,6 +20,11 @@ #include <unistd.h> #include <linux/vm_sockets.h> +/* workaround for older vm_sockets.h */ +#ifndef VMADDR_CID_LOCAL +#define VMADDR_CID_LOCAL 1 +#endif + #include <bpf/bpf.h> #include <bpf/libbpf.h> diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c index 60d952719d27..4512dd808c33 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -3,6 +3,7 @@ #include "cgroup_helpers.h" #include <linux/tcp.h> +#include <linux/netlink.h> #include "sockopt_sk.skel.h" #ifndef SOL_TCP @@ -183,6 +184,33 @@ static int getsetsockopt(void) goto err; } + /* optval=NULL case is handled correctly */ + + close(fd); + fd = socket(AF_NETLINK, SOCK_RAW, 0); + if (fd < 0) { + log_err("Failed to create AF_NETLINK socket"); + return -1; + } + + buf.u32 = 1; + optlen = sizeof(__u32); + err = setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &buf, optlen); + if (err) { + log_err("Unexpected getsockopt(NETLINK_ADD_MEMBERSHIP) err=%d errno=%d", + err, errno); + goto err; + } + + optlen = 0; + err = getsockopt(fd, SOL_NETLINK, NETLINK_LIST_MEMBERSHIPS, NULL, &optlen); + if (err) { + log_err("Unexpected getsockopt(NETLINK_LIST_MEMBERSHIPS) err=%d errno=%d", + err, errno); + goto err; + } + ASSERT_EQ(optlen, 4, "Unexpected NETLINK_LIST_MEMBERSHIPS value"); + free(big_buf); close(fd); return 0; diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c index 48dc9472e160..1c75a32186d6 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -53,6 +53,8 @@ static void test_fentry(void) ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret"); + ASSERT_EQ(skel->bss->t6, 1, "t6 ret"); + tracing_struct__detach(skel); destroy_skel: tracing_struct__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 73dff693d411..c8bab8b1a6a4 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -6,15 +6,21 @@ #include "verifier_and.skel.h" #include "verifier_array_access.skel.h" #include "verifier_basic_stack.skel.h" +#include "verifier_bounds.skel.h" #include "verifier_bounds_deduction.skel.h" #include "verifier_bounds_deduction_non_const.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" +#include "verifier_bpf_get_stack.skel.h" +#include "verifier_btf_ctx_access.skel.h" #include "verifier_cfg.skel.h" #include "verifier_cgroup_inv_retcode.skel.h" #include "verifier_cgroup_skb.skel.h" #include "verifier_cgroup_storage.skel.h" #include "verifier_const_or.skel.h" +#include "verifier_ctx.skel.h" #include "verifier_ctx_sk_msg.skel.h" +#include "verifier_d_path.skel.h" +#include "verifier_direct_packet_access.skel.h" #include "verifier_direct_stack_access_wraparound.skel.h" #include "verifier_div0.skel.h" #include "verifier_div_overflow.skel.h" @@ -23,28 +29,56 @@ #include "verifier_helper_restricted.skel.h" #include "verifier_helper_value_access.skel.h" #include "verifier_int_ptr.skel.h" +#include "verifier_jeq_infer_not_null.skel.h" #include "verifier_ld_ind.skel.h" #include "verifier_leak_ptr.skel.h" +#include "verifier_loops1.skel.h" +#include "verifier_lwt.skel.h" +#include "verifier_map_in_map.skel.h" #include "verifier_map_ptr.skel.h" +#include "verifier_map_ptr_mixing.skel.h" #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" #include "verifier_meta_access.skel.h" +#include "verifier_netfilter_ctx.skel.h" +#include "verifier_netfilter_retcode.skel.h" #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" +#include "verifier_reg_equal.skel.h" +#include "verifier_ref_tracking.skel.h" +#include "verifier_regalloc.skel.h" #include "verifier_ringbuf.skel.h" +#include "verifier_runtime_jit.skel.h" +#include "verifier_search_pruning.skel.h" +#include "verifier_sock.skel.h" #include "verifier_spill_fill.skel.h" +#include "verifier_spin_lock.skel.h" #include "verifier_stack_ptr.skel.h" +#include "verifier_subreg.skel.h" #include "verifier_uninit.skel.h" +#include "verifier_unpriv.skel.h" +#include "verifier_unpriv_perf.skel.h" #include "verifier_value_adj_spill.skel.h" #include "verifier_value.skel.h" +#include "verifier_value_illegal_alu.skel.h" #include "verifier_value_or_null.skel.h" +#include "verifier_value_ptr_arith.skel.h" #include "verifier_var_off.skel.h" #include "verifier_xadd.skel.h" #include "verifier_xdp.skel.h" #include "verifier_xdp_direct_packet_access.skel.h" +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + __maybe_unused -static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) +static void run_tests_aux(const char *skel_name, + skel_elf_bytes_fn elf_bytes_factory, + pre_execution_cb pre_execution_cb) { struct test_loader tester = {}; __u64 old_caps; @@ -57,6 +91,7 @@ static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_fac return; } + test_loader__set_pre_execution_cb(&tester, pre_execution_cb); test_loader__run_subtests(&tester, skel_name, elf_bytes_factory); test_loader_fini(&tester); @@ -65,20 +100,25 @@ static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_fac PRINT_FAIL("failed to restore CAP_SYS_ADMIN: %i, %s\n", err, strerror(err)); } -#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes) +#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes, NULL) void test_verifier_and(void) { RUN(verifier_and); } -void test_verifier_array_access(void) { RUN(verifier_array_access); } void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); } +void test_verifier_bounds(void) { RUN(verifier_bounds); } void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); } void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } +void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); } +void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); } void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); } void test_verifier_const_or(void) { RUN(verifier_const_or); } +void test_verifier_ctx(void) { RUN(verifier_ctx); } void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } +void test_verifier_d_path(void) { RUN(verifier_d_path); } +void test_verifier_direct_packet_access(void) { RUN(verifier_direct_packet_access); } void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } void test_verifier_div0(void) { RUN(verifier_div0); } void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } @@ -87,22 +127,88 @@ void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_acces void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); } void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); } void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } +void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } +void test_verifier_loops1(void) { RUN(verifier_loops1); } +void test_verifier_lwt(void) { RUN(verifier_lwt); } +void test_verifier_map_in_map(void) { RUN(verifier_map_in_map); } void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } +void test_verifier_map_ptr_mixing(void) { RUN(verifier_map_ptr_mixing); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } +void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } +void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } +void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); } +void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); } +void test_verifier_regalloc(void) { RUN(verifier_regalloc); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } +void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } +void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); } +void test_verifier_sock(void) { RUN(verifier_sock); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } +void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } +void test_verifier_subreg(void) { RUN(verifier_subreg); } void test_verifier_uninit(void) { RUN(verifier_uninit); } +void test_verifier_unpriv(void) { RUN(verifier_unpriv); } +void test_verifier_unpriv_perf(void) { RUN(verifier_unpriv_perf); } void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } void test_verifier_value(void) { RUN(verifier_value); } +void test_verifier_value_illegal_alu(void) { RUN(verifier_value_illegal_alu); } void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } void test_verifier_var_off(void) { RUN(verifier_var_off); } void test_verifier_xadd(void) { RUN(verifier_xadd); } void test_verifier_xdp(void) { RUN(verifier_xdp); } void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } + +static int init_test_val_map(struct bpf_object *obj, char *map_name) +{ + struct test_val value = { + .index = (6 + 1) * sizeof(int), + .foo[6] = 0xabcdef12, + }; + struct bpf_map *map; + int err, key = 0; + + map = bpf_object__find_map_by_name(obj, map_name); + if (!map) { + PRINT_FAIL("Can't find map '%s'\n", map_name); + return -EINVAL; + } + + err = bpf_map_update_elem(bpf_map__fd(map), &key, &value, 0); + if (err) { + PRINT_FAIL("Error while updating map '%s': %d\n", map_name, err); + return err; + } + + return 0; +} + +static int init_array_access_maps(struct bpf_object *obj) +{ + return init_test_val_map(obj, "map_array_ro"); +} + +void test_verifier_array_access(void) +{ + run_tests_aux("verifier_array_access", + verifier_array_access__elf_bytes, + init_array_access_maps); +} + +static int init_value_ptr_arith_maps(struct bpf_object *obj) +{ + return init_test_val_map(obj, "map_array_48b"); +} + +void test_verifier_value_ptr_arith(void) +{ + run_tests_aux("verifier_value_ptr_arith", + verifier_value_ptr_arith__elf_bytes, + init_value_ptr_arith_maps); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index c94eb63b7b77..498d3bdaa4b0 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -175,8 +175,8 @@ void test_xdp_do_redirect(void) goto out; /* Enable GRO */ - SYS("ethtool -K veth_src gro on"); - SYS("ethtool -K veth_dst gro on"); + SYS(out, "ethtool -K veth_src gro on"); + SYS(out, "ethtool -K veth_dst gro on"); err = bpf_xdp_query(ifindex_src, XDP_FLAGS_DRV_MODE, &query_opts); if (!ASSERT_OK(err, "veth_src bpf_xdp_query gro on")) diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 6e3b4903c541..d3c1217ba79a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -53,6 +53,10 @@ * - A numeric value. * Multiple __flag attributes could be specified, the final flags * value is derived by applying binary "or" to all specified values. + * + * __auxiliary Annotated program is not a separate test, but used as auxiliary + * for some other test cases and should always be loaded. + * __auxiliary_unpriv Same, but load program in unprivileged mode. */ #define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) @@ -65,6 +69,8 @@ #define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag))) #define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="#val))) #define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="#val))) +#define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary"))) +#define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv"))) /* Convenience macro for use with 'asm volatile' blocks */ #define __naked __attribute__((naked)) @@ -121,107 +127,4 @@ /* make it look to compiler like value is read and written */ #define __sink(expr) asm volatile("" : "+g"(expr)) -struct bpf_iter_num; - -extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym; -extern int *bpf_iter_num_next(struct bpf_iter_num *it) __ksym; -extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; - -#ifndef bpf_for_each -/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for - * using BPF open-coded iterators without having to write mundane explicit - * low-level loop logic. Instead, it provides for()-like generic construct - * that can be used pretty naturally. E.g., for some hypothetical cgroup - * iterator, you'd write: - * - * struct cgroup *cg, *parent_cg = <...>; - * - * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) { - * bpf_printk("Child cgroup id = %d", cg->cgroup_id); - * if (cg->cgroup_id == 123) - * break; - * } - * - * I.e., it looks almost like high-level for each loop in other languages, - * supports continue/break, and is verifiable by BPF verifier. - * - * For iterating integers, the difference betwen bpf_for_each(num, i, N, M) - * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to - * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int - * *`, not just `int`. So for integers bpf_for() is more convenient. - * - * Note: this macro relies on C99 feature of allowing to declare variables - * inside for() loop, bound to for() loop lifetime. It also utilizes GCC - * extension: __attribute__((cleanup(<func>))), supported by both GCC and - * Clang. - */ -#define bpf_for_each(type, cur, args...) for ( \ - /* initialize and define destructor */ \ - struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ - cleanup(bpf_iter_##type##_destroy))), \ - /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ - *___p __attribute__((unused)) = ( \ - bpf_iter_##type##_new(&___it, ##args), \ - /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ - /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ - (void)bpf_iter_##type##_destroy, (void *)0); \ - /* iteration and termination check */ \ - (((cur) = bpf_iter_##type##_next(&___it))); \ -) -#endif /* bpf_for_each */ - -#ifndef bpf_for -/* bpf_for(i, start, end) implements a for()-like looping construct that sets - * provided integer variable *i* to values starting from *start* through, - * but not including, *end*. It also proves to BPF verifier that *i* belongs - * to range [start, end), so this can be used for accessing arrays without - * extra checks. - * - * Note: *start* and *end* are assumed to be expressions with no side effects - * and whose values do not change throughout bpf_for() loop execution. They do - * not have to be statically known or constant, though. - * - * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() - * loop bound variables and cleanup attribute, supported by GCC and Clang. - */ -#define bpf_for(i, start, end) for ( \ - /* initialize and define destructor */ \ - struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ - cleanup(bpf_iter_num_destroy))), \ - /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ - *___p __attribute__((unused)) = ( \ - bpf_iter_num_new(&___it, (start), (end)), \ - /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ - /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ - (void)bpf_iter_num_destroy, (void *)0); \ - ({ \ - /* iteration step */ \ - int *___t = bpf_iter_num_next(&___it); \ - /* termination and bounds check */ \ - (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \ - }); \ -) -#endif /* bpf_for */ - -#ifndef bpf_repeat -/* bpf_repeat(N) performs N iterations without exposing iteration number - * - * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() - * loop bound variables and cleanup attribute, supported by GCC and Clang. - */ -#define bpf_repeat(N) for ( \ - /* initialize and define destructor */ \ - struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ - cleanup(bpf_iter_num_destroy))), \ - /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ - *___p __attribute__((unused)) = ( \ - bpf_iter_num_new(&___it, 0, (N)), \ - /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ - /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ - (void)bpf_iter_num_destroy, (void *)0); \ - bpf_iter_num_next(&___it); \ - /* nothing here */ \ -) -#endif /* bpf_repeat */ - #endif diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c index 53ded51a3abb..57440a554304 100644 --- a/tools/testing/selftests/bpf/progs/linked_list.c +++ b/tools/testing/selftests/bpf/progs/linked_list.c @@ -25,7 +25,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); bpf_obj_drop(f); return 3; } @@ -34,7 +34,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_back(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); bpf_obj_drop(f); return 4; } @@ -42,7 +42,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l bpf_spin_lock(lock); f->data = 42; - bpf_list_push_front(head, &f->node); + bpf_list_push_front(head, &f->node2); bpf_spin_unlock(lock); if (leave_in_map) return 0; @@ -51,7 +51,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l bpf_spin_unlock(lock); if (!n) return 5; - f = container_of(n, struct foo, node); + f = container_of(n, struct foo, node2); if (f->data != 42) { bpf_obj_drop(f); return 6; @@ -59,14 +59,14 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l bpf_spin_lock(lock); f->data = 13; - bpf_list_push_front(head, &f->node); + bpf_list_push_front(head, &f->node2); bpf_spin_unlock(lock); bpf_spin_lock(lock); n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (!n) return 7; - f = container_of(n, struct foo, node); + f = container_of(n, struct foo, node2); if (f->data != 13) { bpf_obj_drop(f); return 8; @@ -77,7 +77,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 9; } @@ -85,7 +85,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_back(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 10; } return 0; @@ -119,8 +119,8 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea f[i + 1]->data = i + 1; bpf_spin_lock(lock); - bpf_list_push_front(head, &f[i]->node); - bpf_list_push_front(head, &f[i + 1]->node); + bpf_list_push_front(head, &f[i]->node2); + bpf_list_push_front(head, &f[i + 1]->node2); bpf_spin_unlock(lock); } @@ -130,13 +130,13 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea bpf_spin_unlock(lock); if (!n) return 3; - pf = container_of(n, struct foo, node); + pf = container_of(n, struct foo, node2); if (pf->data != (ARRAY_SIZE(f) - i - 1)) { bpf_obj_drop(pf); return 4; } bpf_spin_lock(lock); - bpf_list_push_back(head, &pf->node); + bpf_list_push_back(head, &pf->node2); bpf_spin_unlock(lock); } @@ -149,7 +149,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea bpf_spin_unlock(lock); if (!n) return 5; - pf = container_of(n, struct foo, node); + pf = container_of(n, struct foo, node2); if (pf->data != i) { bpf_obj_drop(pf); return 6; @@ -160,7 +160,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea n = bpf_list_pop_back(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 7; } @@ -168,7 +168,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 8; } return 0; @@ -199,7 +199,7 @@ int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool le bpf_spin_lock(lock); f->data = 42; - bpf_list_push_front(head, &f->node); + bpf_list_push_front(head, &f->node2); bpf_spin_unlock(lock); if (leave_in_map) @@ -210,7 +210,7 @@ int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool le bpf_spin_unlock(lock); if (!n) return 4; - f = container_of(n, struct foo, node); + f = container_of(n, struct foo, node2); if (f->data != 42) { bpf_obj_drop(f); return 5; diff --git a/tools/testing/selftests/bpf/progs/linked_list.h b/tools/testing/selftests/bpf/progs/linked_list.h index 3fb2412552fc..c0f3609a7ffa 100644 --- a/tools/testing/selftests/bpf/progs/linked_list.h +++ b/tools/testing/selftests/bpf/progs/linked_list.h @@ -22,7 +22,7 @@ struct foo { struct map_value { struct bpf_spin_lock lock; int data; - struct bpf_list_head head __contains(foo, node); + struct bpf_list_head head __contains(foo, node2); }; struct array_map { @@ -50,7 +50,7 @@ struct { #define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) private(A) struct bpf_spin_lock glock; -private(A) struct bpf_list_head ghead __contains(foo, node); +private(A) struct bpf_list_head ghead __contains(foo, node2); private(B) struct bpf_spin_lock glock2; #endif diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c index 41978b46f58e..f4c63daba229 100644 --- a/tools/testing/selftests/bpf/progs/linked_list_fail.c +++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c @@ -73,22 +73,21 @@ CHECK(inner_map, pop_back, &iv->head); int test##_missing_lock_##op(void *ctx) \ { \ INIT; \ - void (*p)(void *, void *) = (void *)&bpf_list_##op; \ - p(hexpr, nexpr); \ + bpf_list_##op(hexpr, nexpr); \ return 0; \ } -CHECK(kptr, push_front, &f->head, b); -CHECK(kptr, push_back, &f->head, b); +CHECK(kptr, push_front, &f->head, &b->node); +CHECK(kptr, push_back, &f->head, &b->node); -CHECK(global, push_front, &ghead, f); -CHECK(global, push_back, &ghead, f); +CHECK(global, push_front, &ghead, &f->node2); +CHECK(global, push_back, &ghead, &f->node2); -CHECK(map, push_front, &v->head, f); -CHECK(map, push_back, &v->head, f); +CHECK(map, push_front, &v->head, &f->node2); +CHECK(map, push_back, &v->head, &f->node2); -CHECK(inner_map, push_front, &iv->head, f); -CHECK(inner_map, push_back, &iv->head, f); +CHECK(inner_map, push_front, &iv->head, &f->node2); +CHECK(inner_map, push_back, &iv->head, &f->node2); #undef CHECK @@ -135,32 +134,31 @@ CHECK_OP(pop_back); int test##_incorrect_lock_##op(void *ctx) \ { \ INIT; \ - void (*p)(void *, void*) = (void *)&bpf_list_##op; \ bpf_spin_lock(lexpr); \ - p(hexpr, nexpr); \ + bpf_list_##op(hexpr, nexpr); \ return 0; \ } #define CHECK_OP(op) \ - CHECK(kptr_kptr, op, &f1->lock, &f2->head, b); \ - CHECK(kptr_global, op, &f1->lock, &ghead, f); \ - CHECK(kptr_map, op, &f1->lock, &v->head, f); \ - CHECK(kptr_inner_map, op, &f1->lock, &iv->head, f); \ + CHECK(kptr_kptr, op, &f1->lock, &f2->head, &b->node); \ + CHECK(kptr_global, op, &f1->lock, &ghead, &f->node2); \ + CHECK(kptr_map, op, &f1->lock, &v->head, &f->node2); \ + CHECK(kptr_inner_map, op, &f1->lock, &iv->head, &f->node2); \ \ - CHECK(global_global, op, &glock2, &ghead, f); \ - CHECK(global_kptr, op, &glock, &f1->head, b); \ - CHECK(global_map, op, &glock, &v->head, f); \ - CHECK(global_inner_map, op, &glock, &iv->head, f); \ + CHECK(global_global, op, &glock2, &ghead, &f->node2); \ + CHECK(global_kptr, op, &glock, &f1->head, &b->node); \ + CHECK(global_map, op, &glock, &v->head, &f->node2); \ + CHECK(global_inner_map, op, &glock, &iv->head, &f->node2); \ \ - CHECK(map_map, op, &v->lock, &v2->head, f); \ - CHECK(map_kptr, op, &v->lock, &f2->head, b); \ - CHECK(map_global, op, &v->lock, &ghead, f); \ - CHECK(map_inner_map, op, &v->lock, &iv->head, f); \ + CHECK(map_map, op, &v->lock, &v2->head, &f->node2); \ + CHECK(map_kptr, op, &v->lock, &f2->head, &b->node); \ + CHECK(map_global, op, &v->lock, &ghead, &f->node2); \ + CHECK(map_inner_map, op, &v->lock, &iv->head, &f->node2); \ \ - CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, f); \ - CHECK(inner_map_kptr, op, &iv->lock, &f2->head, b); \ - CHECK(inner_map_global, op, &iv->lock, &ghead, f); \ - CHECK(inner_map_map, op, &iv->lock, &v->head, f); + CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, &f->node2);\ + CHECK(inner_map_kptr, op, &iv->lock, &f2->head, &b->node); \ + CHECK(inner_map_global, op, &iv->lock, &ghead, &f->node2); \ + CHECK(inner_map_map, op, &iv->lock, &v->head, &f->node2); CHECK_OP(push_front); CHECK_OP(push_back); @@ -340,7 +338,7 @@ int direct_read_node(void *ctx) f = bpf_obj_new(typeof(*f)); if (!f) return 0; - return *(int *)&f->node; + return *(int *)&f->node2; } SEC("?tc") @@ -351,12 +349,12 @@ int direct_write_node(void *ctx) f = bpf_obj_new(typeof(*f)); if (!f) return 0; - *(int *)&f->node = 0; + *(int *)&f->node2 = 0; return 0; } static __always_inline -int use_after_unlock(void (*op)(void *head, void *node)) +int use_after_unlock(bool push_front) { struct foo *f; @@ -365,7 +363,10 @@ int use_after_unlock(void (*op)(void *head, void *node)) return 0; bpf_spin_lock(&glock); f->data = 42; - op(&ghead, &f->node); + if (push_front) + bpf_list_push_front(&ghead, &f->node2); + else + bpf_list_push_back(&ghead, &f->node2); bpf_spin_unlock(&glock); return f->data; @@ -374,17 +375,17 @@ int use_after_unlock(void (*op)(void *head, void *node)) SEC("?tc") int use_after_unlock_push_front(void *ctx) { - return use_after_unlock((void *)bpf_list_push_front); + return use_after_unlock(true); } SEC("?tc") int use_after_unlock_push_back(void *ctx) { - return use_after_unlock((void *)bpf_list_push_back); + return use_after_unlock(false); } static __always_inline -int list_double_add(void (*op)(void *head, void *node)) +int list_double_add(bool push_front) { struct foo *f; @@ -392,8 +393,13 @@ int list_double_add(void (*op)(void *head, void *node)) if (!f) return 0; bpf_spin_lock(&glock); - op(&ghead, &f->node); - op(&ghead, &f->node); + if (push_front) { + bpf_list_push_front(&ghead, &f->node2); + bpf_list_push_front(&ghead, &f->node2); + } else { + bpf_list_push_back(&ghead, &f->node2); + bpf_list_push_back(&ghead, &f->node2); + } bpf_spin_unlock(&glock); return 0; @@ -402,13 +408,13 @@ int list_double_add(void (*op)(void *head, void *node)) SEC("?tc") int double_push_front(void *ctx) { - return list_double_add((void *)bpf_list_push_front); + return list_double_add(true); } SEC("?tc") int double_push_back(void *ctx) { - return list_double_add((void *)bpf_list_push_back); + return list_double_add(false); } SEC("?tc") @@ -450,7 +456,7 @@ int incorrect_node_var_off(struct __sk_buff *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front(&ghead, (void *)&f->node + ctx->protocol); + bpf_list_push_front(&ghead, (void *)&f->node2 + ctx->protocol); bpf_spin_unlock(&glock); return 0; @@ -465,7 +471,7 @@ int incorrect_node_off1(void *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front(&ghead, (void *)&f->node + 1); + bpf_list_push_front(&ghead, (void *)&f->node2 + 1); bpf_spin_unlock(&glock); return 0; @@ -480,7 +486,7 @@ int incorrect_node_off2(void *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front(&ghead, &f->node2); + bpf_list_push_front(&ghead, &f->node); bpf_spin_unlock(&glock); return 0; @@ -510,7 +516,7 @@ int incorrect_head_var_off1(struct __sk_buff *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node); + bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node2); bpf_spin_unlock(&glock); return 0; @@ -525,7 +531,7 @@ int incorrect_head_var_off2(struct __sk_buff *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node); + bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node2); bpf_spin_unlock(&glock); return 0; @@ -563,7 +569,7 @@ int incorrect_head_off2(void *ctx) return 0; bpf_spin_lock(&glock); - bpf_list_push_front((void *)&ghead + 1, &f->node); + bpf_list_push_front((void *)&ghead + 1, &f->node2); bpf_spin_unlock(&glock); return 0; diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index dae5dab1bbf7..d7150041e5d1 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -115,8 +115,6 @@ DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_mallo DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps); extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern struct prog_test_ref_kfunc * -bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym; @@ -187,25 +185,10 @@ static void test_kptr_ref(struct map_value *v) bpf_kfunc_call_test_release(p); } -static void test_kptr_get(struct map_value *v) -{ - struct prog_test_ref_kfunc *p; - - p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - if (!p) - return; - if (p->a + p->b > 100) { - bpf_kfunc_call_test_release(p); - return; - } - bpf_kfunc_call_test_release(p); -} - static void test_kptr(struct map_value *v) { test_kptr_unref(v); test_kptr_ref(v); - test_kptr_get(v); } SEC("tc") @@ -338,38 +321,25 @@ int test_map_kptr_ref_pre(struct map_value *v) if (p_st->cnt.refs.counter != ref) return 4; - p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - if (!p) - return 5; - ref++; - if (p_st->cnt.refs.counter != ref) { - ret = 6; - goto end; - } - bpf_kfunc_call_test_release(p); - ref--; - if (p_st->cnt.refs.counter != ref) - return 7; - p = bpf_kptr_xchg(&v->ref_ptr, NULL); if (!p) - return 8; + return 5; bpf_kfunc_call_test_release(p); ref--; if (p_st->cnt.refs.counter != ref) - return 9; + return 6; p = bpf_kfunc_call_test_acquire(&arg); if (!p) - return 10; + return 7; ref++; p = bpf_kptr_xchg(&v->ref_ptr, p); if (p) { - ret = 11; + ret = 8; goto end; } if (p_st->cnt.refs.counter != ref) - return 12; + return 9; /* Leave in map */ return 0; diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index 15bf3127dba3..da8c724f839b 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -21,8 +21,6 @@ struct array_map { extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; -extern struct prog_test_ref_kfunc * -bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; SEC("?tc") __failure __msg("kptr access size must be BPF_DW") @@ -221,67 +219,6 @@ int reject_kptr_xchg_on_unref(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("arg#0 expected pointer to map value") -int reject_kptr_get_no_map_val(struct __sk_buff *ctx) -{ - bpf_kfunc_call_test_kptr_get((void *)&ctx, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 expected pointer to map value") -int reject_kptr_get_no_null_map_val(struct __sk_buff *ctx) -{ - bpf_kfunc_call_test_kptr_get(bpf_map_lookup_elem(&array_map, &(int){0}), 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 no referenced kptr at map value offset=0") -int reject_kptr_get_no_kptr(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get((void *)v, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 no referenced kptr at map value offset=8") -int reject_kptr_get_on_unref(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get(&v->unref_ptr, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("kernel function bpf_kfunc_call_test_kptr_get args#0") -int reject_kptr_get_bad_type_match(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get((void *)&v->ref_memb_ptr, 0, 0); - return 0; -} - -SEC("?tc") __failure __msg("R1 type=rcu_ptr_or_null_ expected=percpu_ptr_") int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx) { @@ -429,21 +366,6 @@ int kptr_xchg_ref_state(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=3 alloc_insn=") -int kptr_get_ref_state(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - return 0; -} - -SEC("?tc") __failure __msg("Possibly NULL pointer passed to helper arg2") int kptr_xchg_possibly_null(struct __sk_buff *ctx) { diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c index 4c90aa6abddd..b09f4fffe57c 100644 --- a/tools/testing/selftests/bpf/progs/rbtree.c +++ b/tools/testing/selftests/bpf/progs/rbtree.c @@ -93,9 +93,11 @@ long rbtree_add_and_remove(void *ctx) res = bpf_rbtree_remove(&groot, &n->node); bpf_spin_unlock(&glock); + if (!res) + return 1; + n = container_of(res, struct node_data, node); removed_key = n->key; - bpf_obj_drop(n); return 0; @@ -148,9 +150,11 @@ long rbtree_first_and_remove(void *ctx) res = bpf_rbtree_remove(&groot, &o->node); bpf_spin_unlock(&glock); + if (!res) + return 5; + o = container_of(res, struct node_data, node); removed_key = o->key; - bpf_obj_drop(o); bpf_spin_lock(&glock); @@ -173,4 +177,70 @@ err_out: return 1; } +SEC("tc") +long rbtree_api_release_aliasing(void *ctx) +{ + struct node_data *n, *m, *o; + struct bpf_rb_node *res, *res2; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + n->key = 41; + n->data = 42; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_spin_unlock(&glock); + + bpf_spin_lock(&glock); + + /* m and o point to the same node, + * but verifier doesn't know this + */ + res = bpf_rbtree_first(&groot); + if (!res) + goto err_out; + o = container_of(res, struct node_data, node); + + res = bpf_rbtree_first(&groot); + if (!res) + goto err_out; + m = container_of(res, struct node_data, node); + + res = bpf_rbtree_remove(&groot, &m->node); + /* Retval of previous remove returns an owning reference to m, + * which is the same node non-owning ref o is pointing at. + * We can safely try to remove o as the second rbtree_remove will + * return NULL since the node isn't in a tree. + * + * Previously we relied on the verifier type system + rbtree_remove + * invalidating non-owning refs to ensure that rbtree_remove couldn't + * fail, but now rbtree_remove does runtime checking so we no longer + * invalidate non-owning refs after remove. + */ + res2 = bpf_rbtree_remove(&groot, &o->node); + + bpf_spin_unlock(&glock); + + if (res) { + o = container_of(res, struct node_data, node); + first_data[0] = o->data; + bpf_obj_drop(o); + } + if (res2) { + /* The second remove fails, so res2 is null and this doesn't + * execute + */ + m = container_of(res2, struct node_data, node); + first_data[1] = m->data; + bpf_obj_drop(m); + } + return 0; + +err_out: + bpf_spin_unlock(&glock); + return 1; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index 46d7d18a218f..3fecf1c6dfe5 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=2 alloc_insn=10") +__failure __msg("Unreleased reference id=3 alloc_insn=10") long rbtree_api_remove_no_drop(void *ctx) { struct bpf_rb_node *res; @@ -118,11 +118,13 @@ long rbtree_api_remove_no_drop(void *ctx) res = bpf_rbtree_remove(&groot, res); - n = container_of(res, struct node_data, node); - __sink(n); + if (res) { + n = container_of(res, struct node_data, node); + __sink(n); + } bpf_spin_unlock(&glock); - /* bpf_obj_drop(n) is missing here */ + /* if (res) { bpf_obj_drop(n); } is missing here */ return 0; unlock_err: @@ -150,35 +152,36 @@ long rbtree_api_add_to_multiple_trees(void *ctx) } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") -long rbtree_api_add_release_unlock_escape(void *ctx) +__failure __msg("dereference of modified ptr_or_null_ ptr R2 off=16 disallowed") +long rbtree_api_use_unchecked_remove_retval(void *ctx) { - struct node_data *n; - - n = bpf_obj_new(typeof(*n)); - if (!n) - return 1; + struct bpf_rb_node *res; bpf_spin_lock(&glock); - bpf_rbtree_add(&groot, &n->node, less); + + res = bpf_rbtree_first(&groot); + if (!res) + goto err_out; + res = bpf_rbtree_remove(&groot, res); + bpf_spin_unlock(&glock); bpf_spin_lock(&glock); - /* After add() in previous critical section, n should be - * release_on_unlock and released after previous spin_unlock, - * so should not be possible to use it here - */ - bpf_rbtree_remove(&groot, &n->node); + /* Must check res for NULL before using in rbtree_add below */ + bpf_rbtree_add(&groot, res, less); bpf_spin_unlock(&glock); return 0; + +err_out: + bpf_spin_unlock(&glock); + return 1; } SEC("?tc") __failure __msg("rbtree_remove node input must be non-owning ref") -long rbtree_api_release_aliasing(void *ctx) +long rbtree_api_add_release_unlock_escape(void *ctx) { - struct node_data *n, *m, *o; - struct bpf_rb_node *res; + struct node_data *n; n = bpf_obj_new(typeof(*n)); if (!n) @@ -189,37 +192,11 @@ long rbtree_api_release_aliasing(void *ctx) bpf_spin_unlock(&glock); bpf_spin_lock(&glock); - - /* m and o point to the same node, - * but verifier doesn't know this - */ - res = bpf_rbtree_first(&groot); - if (!res) - return 1; - o = container_of(res, struct node_data, node); - - res = bpf_rbtree_first(&groot); - if (!res) - return 1; - m = container_of(res, struct node_data, node); - - bpf_rbtree_remove(&groot, &m->node); - /* This second remove shouldn't be possible. Retval of previous - * remove returns owning reference to m, which is the same - * node o's non-owning ref is pointing at - * - * In order to preserve property - * * owning ref must not be in rbtree - * * non-owning ref must be in rbtree - * - * o's ref must be invalidated after previous remove. Otherwise - * we'd have non-owning ref to node that isn't in rbtree, and - * verifier wouldn't be able to use type system to prevent remove - * of ref that already isn't in any tree. Would have to do runtime - * checks in that case. + /* After add() in previous critical section, n should be + * release_on_unlock and released after previous spin_unlock, + * so should not be possible to use it here */ - bpf_rbtree_remove(&groot, &o->node); - + bpf_rbtree_remove(&groot, &n->node); bpf_spin_unlock(&glock); return 0; } diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c new file mode 100644 index 000000000000..1d348a225140 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + long key; + long list_data; + struct bpf_rb_node r; + struct bpf_list_node l; + struct bpf_refcount ref; +}; + +struct map_value { + struct node_data __kptr *node; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} stashed_nodes SEC(".maps"); + +struct node_acquire { + long key; + long data; + struct bpf_rb_node node; + struct bpf_refcount refcount; +}; + +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock lock; +private(A) struct bpf_rb_root root __contains(node_data, r); +private(A) struct bpf_list_head head __contains(node_data, l); + +private(B) struct bpf_spin_lock alock; +private(B) struct bpf_rb_root aroot __contains(node_acquire, node); + +static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b) +{ + struct node_data *a; + struct node_data *b; + + a = container_of(node_a, struct node_data, r); + b = container_of(node_b, struct node_data, r); + + return a->key < b->key; +} + +static bool less_a(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_acquire *node_a; + struct node_acquire *node_b; + + node_a = container_of(a, struct node_acquire, node); + node_b = container_of(b, struct node_acquire, node); + + return node_a->key < node_b->key; +} + +static long __insert_in_tree_and_list(struct bpf_list_head *head, + struct bpf_rb_root *root, + struct bpf_spin_lock *lock) +{ + struct node_data *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return -1; + + m = bpf_refcount_acquire(n); + m->key = 123; + m->list_data = 456; + + bpf_spin_lock(lock); + if (bpf_rbtree_add(root, &n->r, less)) { + /* Failure to insert - unexpected */ + bpf_spin_unlock(lock); + bpf_obj_drop(m); + return -2; + } + bpf_spin_unlock(lock); + + bpf_spin_lock(lock); + if (bpf_list_push_front(head, &m->l)) { + /* Failure to insert - unexpected */ + bpf_spin_unlock(lock); + return -3; + } + bpf_spin_unlock(lock); + return 0; +} + +static long __stash_map_insert_tree(int idx, int val, struct bpf_rb_root *root, + struct bpf_spin_lock *lock) +{ + struct map_value *mapval; + struct node_data *n, *m; + + mapval = bpf_map_lookup_elem(&stashed_nodes, &idx); + if (!mapval) + return -1; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return -2; + + n->key = val; + m = bpf_refcount_acquire(n); + + n = bpf_kptr_xchg(&mapval->node, n); + if (n) { + bpf_obj_drop(n); + bpf_obj_drop(m); + return -3; + } + + bpf_spin_lock(lock); + if (bpf_rbtree_add(root, &m->r, less)) { + /* Failure to insert - unexpected */ + bpf_spin_unlock(lock); + return -4; + } + bpf_spin_unlock(lock); + return 0; +} + +static long __read_from_tree(struct bpf_rb_root *root, + struct bpf_spin_lock *lock, + bool remove_from_tree) +{ + struct bpf_rb_node *rb; + struct node_data *n; + long res = -99; + + bpf_spin_lock(lock); + + rb = bpf_rbtree_first(root); + if (!rb) { + bpf_spin_unlock(lock); + return -1; + } + + n = container_of(rb, struct node_data, r); + res = n->key; + + if (!remove_from_tree) { + bpf_spin_unlock(lock); + return res; + } + + rb = bpf_rbtree_remove(root, rb); + bpf_spin_unlock(lock); + if (!rb) + return -2; + n = container_of(rb, struct node_data, r); + bpf_obj_drop(n); + return res; +} + +static long __read_from_list(struct bpf_list_head *head, + struct bpf_spin_lock *lock, + bool remove_from_list) +{ + struct bpf_list_node *l; + struct node_data *n; + long res = -99; + + bpf_spin_lock(lock); + + l = bpf_list_pop_front(head); + if (!l) { + bpf_spin_unlock(lock); + return -1; + } + + n = container_of(l, struct node_data, l); + res = n->list_data; + + if (!remove_from_list) { + if (bpf_list_push_back(head, &n->l)) { + bpf_spin_unlock(lock); + return -2; + } + } + + bpf_spin_unlock(lock); + + if (remove_from_list) + bpf_obj_drop(n); + return res; +} + +static long __read_from_unstash(int idx) +{ + struct node_data *n = NULL; + struct map_value *mapval; + long val = -99; + + mapval = bpf_map_lookup_elem(&stashed_nodes, &idx); + if (!mapval) + return -1; + + n = bpf_kptr_xchg(&mapval->node, n); + if (!n) + return -2; + + val = n->key; + bpf_obj_drop(n); + return val; +} + +#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(579) \ +long insert_and_remove_tree_##rem_tree##_list_##rem_list(void *ctx) \ +{ \ + long err, tree_data, list_data; \ + \ + err = __insert_in_tree_and_list(&head, &root, &lock); \ + if (err) \ + return err; \ + \ + err = __read_from_tree(&root, &lock, rem_tree); \ + if (err < 0) \ + return err; \ + else \ + tree_data = err; \ + \ + err = __read_from_list(&head, &lock, rem_list); \ + if (err < 0) \ + return err; \ + else \ + list_data = err; \ + \ + return tree_data + list_data; \ +} + +/* After successful insert of struct node_data into both collections: + * - it should have refcount = 2 + * - removing / not removing the node_data from a collection after + * reading should have no effect on ability to read / remove from + * the other collection + */ +INSERT_READ_BOTH(true, true, "insert_read_both: remove from tree + list"); +INSERT_READ_BOTH(false, false, "insert_read_both: remove from neither"); +INSERT_READ_BOTH(true, false, "insert_read_both: remove from tree"); +INSERT_READ_BOTH(false, true, "insert_read_both: remove from list"); + +#undef INSERT_READ_BOTH +#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(579) \ +long insert_and_remove_lf_tree_##rem_tree##_list_##rem_list(void *ctx) \ +{ \ + long err, tree_data, list_data; \ + \ + err = __insert_in_tree_and_list(&head, &root, &lock); \ + if (err) \ + return err; \ + \ + err = __read_from_list(&head, &lock, rem_list); \ + if (err < 0) \ + return err; \ + else \ + list_data = err; \ + \ + err = __read_from_tree(&root, &lock, rem_tree); \ + if (err < 0) \ + return err; \ + else \ + tree_data = err; \ + \ + return tree_data + list_data; \ +} + +/* Similar to insert_read_both, but list data is read and possibly removed + * first + * + * Results should be no different than reading and possibly removing rbtree + * node first + */ +INSERT_READ_BOTH(true, true, "insert_read_both_list_first: remove from tree + list"); +INSERT_READ_BOTH(false, false, "insert_read_both_list_first: remove from neither"); +INSERT_READ_BOTH(true, false, "insert_read_both_list_first: remove from tree"); +INSERT_READ_BOTH(false, true, "insert_read_both_list_first: remove from list"); + +#define INSERT_DOUBLE_READ_AND_DEL(read_fn, read_root, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(-1) \ +long insert_double_##read_fn##_and_del_##read_root(void *ctx) \ +{ \ + long err, list_data; \ + \ + err = __insert_in_tree_and_list(&head, &root, &lock); \ + if (err) \ + return err; \ + \ + err = read_fn(&read_root, &lock, true); \ + if (err < 0) \ + return err; \ + else \ + list_data = err; \ + \ + err = read_fn(&read_root, &lock, true); \ + if (err < 0) \ + return err; \ + \ + return err + list_data; \ +} + +/* Insert into both tree and list, then try reading-and-removing from either twice + * + * The second read-and-remove should fail on read step since the node has + * already been removed + */ +INSERT_DOUBLE_READ_AND_DEL(__read_from_tree, root, "insert_double_del: 2x read-and-del from tree"); +INSERT_DOUBLE_READ_AND_DEL(__read_from_list, head, "insert_double_del: 2x read-and-del from list"); + +#define INSERT_STASH_READ(rem_tree, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(84) \ +long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \ +{ \ + long err, tree_data, map_data; \ + \ + err = __stash_map_insert_tree(0, 42, &root, &lock); \ + if (err) \ + return err; \ + \ + err = __read_from_tree(&root, &lock, rem_tree); \ + if (err < 0) \ + return err; \ + else \ + tree_data = err; \ + \ + err = __read_from_unstash(0); \ + if (err < 0) \ + return err; \ + else \ + map_data = err; \ + \ + return tree_data + map_data; \ +} + +/* Stash a refcounted node in map_val, insert same node into tree, then try + * reading data from tree then unstashed map_val, possibly removing from tree + * + * Removing from tree should have no effect on map_val kptr validity + */ +INSERT_STASH_READ(true, "insert_stash_read: remove from tree"); +INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree"); + +SEC("tc") +__success +long rbtree_refcounted_node_ref_escapes(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&alock); + bpf_rbtree_add(&aroot, &n->node, less_a); + m = bpf_refcount_acquire(n); + bpf_spin_unlock(&alock); + + m->key = 2; + bpf_obj_drop(m); + return 0; +} + +SEC("tc") +__success +long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + m = bpf_refcount_acquire(n); + m->key = 2; + + bpf_spin_lock(&alock); + bpf_rbtree_add(&aroot, &n->node, less_a); + bpf_spin_unlock(&alock); + + bpf_obj_drop(m); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c new file mode 100644 index 000000000000..efcb308f80ad --- /dev/null +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" +#include "bpf_misc.h" + +struct node_acquire { + long key; + long data; + struct bpf_rb_node node; + struct bpf_refcount refcount; +}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_rb_root groot __contains(node_acquire, node); + +static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_acquire *node_a; + struct node_acquire *node_b; + + node_a = container_of(a, struct node_acquire, node); + node_b = container_of(b, struct node_acquire, node); + + return node_a->key < node_b->key; +} + +SEC("?tc") +__failure __msg("Unreleased reference id=3 alloc_insn=21") +long rbtree_refcounted_node_ref_escapes(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + /* m becomes an owning ref but is never drop'd or added to a tree */ + m = bpf_refcount_acquire(n); + bpf_spin_unlock(&glock); + + m->key = 2; + return 0; +} + +SEC("?tc") +__failure __msg("Unreleased reference id=3 alloc_insn=9") +long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + /* m becomes an owning ref but is never drop'd or added to a tree */ + m = bpf_refcount_acquire(n); + m->key = 2; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_spin_unlock(&glock); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c index c8d810010a94..fe1df4cd206e 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_sk.c +++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c @@ -32,6 +32,12 @@ int _getsockopt(struct bpf_sockopt *ctx) __u8 *optval_end = ctx->optval_end; __u8 *optval = ctx->optval; struct sockopt_sk *storage; + struct bpf_sock *sk; + + /* Bypass AF_NETLINK. */ + sk = ctx->sk; + if (sk && sk->family == AF_NETLINK) + return 1; /* Make sure bpf_get_netns_cookie is callable. */ @@ -131,6 +137,12 @@ int _setsockopt(struct bpf_sockopt *ctx) __u8 *optval_end = ctx->optval_end; __u8 *optval = ctx->optval; struct sockopt_sk *storage; + struct bpf_sock *sk; + + /* Bypass AF_NETLINK. */ + sk = ctx->sk; + if (sk && sk->family == AF_NETLINK) + return 1; /* Make sure bpf_get_netns_cookie is callable. */ diff --git a/tools/testing/selftests/bpf/progs/test_access_variable_array.c b/tools/testing/selftests/bpf/progs/test_access_variable_array.c new file mode 100644 index 000000000000..808c49b79889 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_access_variable_array.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Bytedance */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +unsigned long span = 0; + +SEC("fentry/load_balance") +int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq, + struct sched_domain *sd) +{ + span = sd->span[0]; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_log_fixup.c b/tools/testing/selftests/bpf/progs/test_log_fixup.c index 60450cb0e72e..1bd48feaaa42 100644 --- a/tools/testing/selftests/bpf/progs/test_log_fixup.c +++ b/tools/testing/selftests/bpf/progs/test_log_fixup.c @@ -61,4 +61,14 @@ int use_missing_map(const void *ctx) return value != NULL; } +extern int bpf_nonexistent_kfunc(void) __ksym __weak; + +SEC("?raw_tp/sys_enter") +int use_missing_kfunc(const void *ctx) +{ + bpf_nonexistent_kfunc(); + + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/testing/selftests/bpf/progs/tracing_struct.c index e718f0ebee7d..c435a3a8328a 100644 --- a/tools/testing/selftests/bpf/progs/tracing_struct.c +++ b/tools/testing/selftests/bpf/progs/tracing_struct.c @@ -13,12 +13,18 @@ struct bpf_testmod_struct_arg_2 { long b; }; +struct bpf_testmod_struct_arg_3 { + int a; + int b[]; +}; + long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs; __u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3; long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret; long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret; long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret; long t5_ret; +int t6; SEC("fentry/bpf_testmod_test_struct_arg_1") int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c) @@ -117,4 +123,11 @@ int BPF_PROG2(test_struct_arg_10, int, ret) return 0; } +SEC("fentry/bpf_testmod_test_struct_arg_6") +int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_struct_arg_3 *, a) +{ + t6 = a->b[0]; + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c new file mode 100644 index 000000000000..c5588a14fe2e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -0,0 +1,1076 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/bounds.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("subtraction bounds (map value) variant 1") +__failure __msg("R0 max value is outside of the allowed memory range") +__failure_unpriv +__naked void bounds_map_value_variant_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + if r1 > 0xff goto l0_%=; \ + r3 = *(u8*)(r0 + 1); \ + if r3 > 0xff goto l0_%=; \ + r1 -= r3; \ + r1 >>= 56; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("subtraction bounds (map value) variant 2") +__failure +__msg("R0 min value is negative, either use unsigned index or do a if (index >=0) check.") +__msg_unpriv("R1 has unknown scalar with mixed signed bounds") +__naked void bounds_map_value_variant_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + if r1 > 0xff goto l0_%=; \ + r3 = *(u8*)(r0 + 1); \ + if r3 > 0xff goto l0_%=; \ + r1 -= r3; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("check subtraction on pointers for unpriv") +__success __failure_unpriv __msg_unpriv("R9 pointer -= pointer prohibited") +__retval(0) +__naked void subtraction_on_pointers_for_unpriv(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = %[map_hash_8b] ll; \ + r2 = r10; \ + r2 += -8; \ + r6 = 9; \ + *(u64*)(r2 + 0) = r6; \ + call %[bpf_map_lookup_elem]; \ + r9 = r10; \ + r9 -= r0; \ + r1 = %[map_hash_8b] ll; \ + r2 = r10; \ + r2 += -8; \ + r6 = 0; \ + *(u64*)(r2 + 0) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: *(u64*)(r0 + 0) = r9; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check based on zero-extended MOV") +__success __success_unpriv __retval(0) +__naked void based_on_zero_extended_mov(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r2 = 0x0000'0000'ffff'ffff */ \ + w2 = 0xffffffff; \ + /* r2 = 0 */ \ + r2 >>= 32; \ + /* no-op */ \ + r0 += r2; \ + /* access at offset 0 */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check based on sign-extended MOV. test1") +__failure __msg("map_value pointer and 4294967295") +__failure_unpriv +__naked void on_sign_extended_mov_test1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r2 = 0xffff'ffff'ffff'ffff */ \ + r2 = 0xffffffff; \ + /* r2 = 0xffff'ffff */ \ + r2 >>= 32; \ + /* r0 = <oob pointer> */ \ + r0 += r2; \ + /* access to OOB pointer */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check based on sign-extended MOV. test2") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__naked void on_sign_extended_mov_test2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r2 = 0xffff'ffff'ffff'ffff */ \ + r2 = 0xffffffff; \ + /* r2 = 0xfff'ffff */ \ + r2 >>= 36; \ + /* r0 = <oob pointer> */ \ + r0 += r2; \ + /* access to OOB pointer */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("bounds check based on reg_off + var_off + insn_off. test1") +__failure __msg("value_size=8 off=1073741825") +__naked void var_off_insn_off_test1(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r6 &= 1; \ + r6 += %[__imm_0]; \ + r0 += r6; \ + r0 += %[__imm_0]; \ +l0_%=: r0 = *(u8*)(r0 + 3); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, (1 << 29) - 1), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("bounds check based on reg_off + var_off + insn_off. test2") +__failure __msg("value 1073741823") +__naked void var_off_insn_off_test2(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r6 &= 1; \ + r6 += %[__imm_0]; \ + r0 += r6; \ + r0 += %[__imm_1]; \ +l0_%=: r0 = *(u8*)(r0 + 3); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, (1 << 30) - 1), + __imm_const(__imm_1, (1 << 29) - 1), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after truncation of non-boundary-crossing range") +__success __success_unpriv __retval(0) +__naked void of_non_boundary_crossing_range(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + r2 = 1; \ + /* r2 = 0x10'0000'0000 */ \ + r2 <<= 36; \ + /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ \ + r1 += r2; \ + /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ \ + r1 += 0x7fffffff; \ + /* r1 = [0x00, 0xff] */ \ + w1 -= 0x7fffffff; \ + /* r1 = 0 */ \ + r1 >>= 8; \ + /* no-op */ \ + r0 += r1; \ + /* access at offset 0 */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after truncation of boundary-crossing range (1)") +__failure +/* not actually fully unbounded, but the bound is very high */ +__msg("value -4294967168 makes map_value pointer be out of bounds") +__failure_unpriv +__naked void of_boundary_crossing_range_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0xffff'ffff] or \ + * [0x0000'0000, 0x0000'007f] \ + */ \ + w1 += 0; \ + r1 -= %[__imm_0]; \ + /* r1 = [0x00, 0xff] or \ + * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\ + */ \ + r1 -= %[__imm_0]; \ + /* error on OOB pointer computation */ \ + r0 += r1; \ + /* exit */ \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, 0xffffff80 >> 1) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after truncation of boundary-crossing range (2)") +__failure __msg("value -4294967168 makes map_value pointer be out of bounds") +__failure_unpriv +__naked void of_boundary_crossing_range_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0xffff'ffff] or \ + * [0x0000'0000, 0x0000'007f] \ + * difference to previous test: truncation via MOV32\ + * instead of ALU32. \ + */ \ + w1 = w1; \ + r1 -= %[__imm_0]; \ + /* r1 = [0x00, 0xff] or \ + * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\ + */ \ + r1 -= %[__imm_0]; \ + /* error on OOB pointer computation */ \ + r0 += r1; \ + /* exit */ \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, 0xffffff80 >> 1) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after wrapping 32-bit addition") +__success __success_unpriv __retval(0) +__naked void after_wrapping_32_bit_addition(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = 0x7fff'ffff */ \ + r1 = 0x7fffffff; \ + /* r1 = 0xffff'fffe */ \ + r1 += 0x7fffffff; \ + /* r1 = 0 */ \ + w1 += 2; \ + /* no-op */ \ + r0 += r1; \ + /* access at offset 0 */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after shift with oversized count operand") +__failure __msg("R0 max value is outside of the allowed memory range") +__failure_unpriv +__naked void shift_with_oversized_count_operand(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = 32; \ + r1 = 1; \ + /* r1 = (u32)1 << (u32)32 = ? */ \ + w1 <<= w2; \ + /* r1 = [0x0000, 0xffff] */ \ + r1 &= 0xffff; \ + /* computes unknown pointer, potentially OOB */ \ + r0 += r1; \ + /* potentially OOB access */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after right shift of maybe-negative number") +__failure __msg("R0 unbounded memory access") +__failure_unpriv +__naked void shift_of_maybe_negative_number(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + /* r1 = [-0x01, 0xfe] */ \ + r1 -= 1; \ + /* r1 = 0 or 0xff'ffff'ffff'ffff */ \ + r1 >>= 8; \ + /* r1 = 0 or 0xffff'ffff'ffff */ \ + r1 >>= 8; \ + /* computes unknown pointer, potentially OOB */ \ + r0 += r1; \ + /* potentially OOB access */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after 32-bit right shift with 64-bit input") +__failure __msg("math between map_value pointer and 4294967294 is not allowed") +__failure_unpriv +__naked void shift_with_64_bit_input(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 2; \ + /* r1 = 1<<32 */ \ + r1 <<= 31; \ + /* r1 = 0 (NOT 2!) */ \ + w1 >>= 31; \ + /* r1 = 0xffff'fffe (NOT 0!) */ \ + w1 -= 2; \ + /* error on computing OOB pointer */ \ + r0 += r1; \ + /* exit */ \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test1") +__failure __msg("map_value pointer and 2147483646") +__failure_unpriv +__naked void size_signed_32bit_overflow_test1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 += 0x7ffffffe; \ + r0 = *(u64*)(r0 + 0); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test2") +__failure __msg("pointer offset 1073741822") +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__naked void size_signed_32bit_overflow_test2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 += 0x1fffffff; \ + r0 += 0x1fffffff; \ + r0 += 0x1fffffff; \ + r0 = *(u64*)(r0 + 0); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test3") +__failure __msg("pointer offset -1073741822") +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__naked void size_signed_32bit_overflow_test3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 -= 0x1fffffff; \ + r0 -= 0x1fffffff; \ + r0 = *(u64*)(r0 + 2); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test4") +__failure __msg("map_value pointer and 1000000000000") +__failure_unpriv +__naked void size_signed_32bit_overflow_test4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 1000000; \ + r1 *= 1000000; \ + r0 += r1; \ + r0 = *(u64*)(r0 + 2); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check mixed 32bit and 64bit arithmetic. test1") +__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__retval(0) +__naked void _32bit_and_64bit_arithmetic_test1(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = -1; \ + r1 <<= 32; \ + r1 += 1; \ + /* r1 = 0xffffFFFF00000001 */ \ + if w1 > 1 goto l0_%=; \ + /* check ALU64 op keeps 32bit bounds */ \ + r1 += 1; \ + if w1 > 2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: /* invalid ldx if bounds are lost above */ \ + r0 = *(u64*)(r0 - 1); \ +l1_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("bounds check mixed 32bit and 64bit arithmetic. test2") +__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__retval(0) +__naked void _32bit_and_64bit_arithmetic_test2(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = -1; \ + r1 <<= 32; \ + r1 += 1; \ + /* r1 = 0xffffFFFF00000001 */ \ + r2 = 3; \ + /* r1 = 0x2 */ \ + w1 += 1; \ + /* check ALU32 op zero extends 64bit bounds */ \ + if r1 > r2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: /* invalid ldx if bounds are lost above */ \ + r0 = *(u64*)(r0 - 1); \ +l1_%=: exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("assigning 32bit bounds to 64bit for wA = 0, wB = wA") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void for_wa_0_wb_wa(void) +{ + asm volatile (" \ + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ + w9 = 0; \ + w2 = w9; \ + r6 = r7; \ + r6 += r2; \ + r3 = r6; \ + r3 += 8; \ + if r3 > r8 goto l0_%=; \ + r5 = *(u32*)(r6 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg = 0, reg xor 1") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg_0_reg_xor_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 0; \ + r1 ^= 1; \ + if r1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg32 = 0, reg32 xor 1") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg32_0_reg32_xor_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: w1 = 0; \ + w1 ^= 1; \ + if w1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg = 2, reg xor 3") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg_2_reg_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 2; \ + r1 ^= 3; \ + if r1 > 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg = any, reg xor 3") +__failure __msg("invalid access to map value") +__msg_unpriv("invalid access to map value") +__naked void reg_any_reg_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + r1 ^= 3; \ + if r1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg32 = any, reg32 xor 3") +__failure __msg("invalid access to map value") +__msg_unpriv("invalid access to map value") +__naked void reg32_any_reg32_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + w1 ^= 3; \ + if w1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg > 0, reg xor 3") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg_0_reg_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + if r1 <= 0 goto l1_%=; \ + r1 ^= 3; \ + if r1 >= 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg32 > 0, reg32 xor 3") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg32_0_reg32_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + if w1 <= 0 goto l1_%=; \ + w1 ^= 3; \ + if w1 >= 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks after 32-bit truncation. test 1") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void _32_bit_truncation_test_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + /* This used to reduce the max bound to 0x7fffffff */\ + if r1 == 0 goto l1_%=; \ + if r1 > 0x7fffffff goto l0_%=; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks after 32-bit truncation. test 2") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void _32_bit_truncation_test_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if r1 s< 1 goto l1_%=; \ + if w1 s< 0 goto l0_%=; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP_JLT for crossing 64-bit signed boundary") +__success __retval(0) +__naked void crossing_64_bit_signed_boundary_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + r0 = 0x7fffffffffffff10 ll; \ + r1 += r0; \ + r0 = 0x8000000000000000 ll; \ +l1_%=: r0 += 1; \ + /* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */\ + if r0 < r1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP_JSLT for crossing 64-bit signed boundary") +__success __retval(0) +__naked void crossing_64_bit_signed_boundary_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + r0 = 0x7fffffffffffff10 ll; \ + r1 += r0; \ + r2 = 0x8000000000000fff ll; \ + r0 = 0x8000000000000000 ll; \ +l1_%=: r0 += 1; \ + if r0 s> r2 goto l0_%=; \ + /* r1 signed range is [S64_MIN, S64_MAX] */ \ + if r0 s< r1 goto l1_%=; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check for loop upper bound greater than U32_MAX") +__success __retval(0) +__naked void bound_greater_than_u32_max(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + r0 = 0x100000000 ll; \ + r1 += r0; \ + r0 = 0x100000000 ll; \ +l1_%=: r0 += 1; \ + if r0 < r1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP32_JLT for crossing 32-bit signed boundary") +__success __retval(0) +__naked void crossing_32_bit_signed_boundary_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + w0 = 0x7fffff10; \ + w1 += w0; \ + w0 = 0x80000000; \ +l1_%=: w0 += 1; \ + /* r1 unsigned range is [0, 0x8000000f] */ \ + if w0 < w1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP32_JSLT for crossing 32-bit signed boundary") +__success __retval(0) +__naked void crossing_32_bit_signed_boundary_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + w0 = 0x7fffff10; \ + w1 += w0; \ + w2 = 0x80000fff; \ + w0 = 0x80000000; \ +l1_%=: w0 += 1; \ + if w0 s> w2 goto l0_%=; \ + /* r1 signed range is [S32_MIN, S32_MAX] */ \ + if w0 s< w1 goto l1_%=; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c b/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c new file mode 100644 index 000000000000..325a2bab4a71 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/bpf_get_stack.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("tracepoint") +__description("bpf_get_stack return R0 within range") +__success +__naked void stack_return_r0_within_range(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + r9 = %[__imm_0]; \ + r1 = r6; \ + r2 = r7; \ + r3 = %[__imm_0]; \ + r4 = 256; \ + call %[bpf_get_stack]; \ + r1 = 0; \ + r8 = r0; \ + r8 <<= 32; \ + r8 s>>= 32; \ + if r1 s> r8 goto l0_%=; \ + r9 -= r8; \ + r2 = r7; \ + r2 += r8; \ + r1 = r9; \ + r1 <<= 32; \ + r1 s>>= 32; \ + r3 = r2; \ + r3 += r1; \ + r1 = r7; \ + r5 = %[__imm_0]; \ + r1 += r5; \ + if r3 >= r1 goto l0_%=; \ + r1 = r6; \ + r3 = r9; \ + r4 = 0; \ + call %[bpf_get_stack]; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_stack), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) / 2) + : __clobber_all); +} + +SEC("iter/task") +__description("bpf_get_task_stack return R0 range is refined") +__success +__naked void return_r0_range_is_refined(void) +{ + asm volatile (" \ + r6 = *(u64*)(r1 + 0); \ + r6 = *(u64*)(r6 + 0); /* ctx->meta->seq */\ + r7 = *(u64*)(r1 + 8); /* ctx->task */\ + r1 = %[map_array_48b] ll; /* fixup_map_array_48b */\ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: if r7 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r1 = r7; \ + r2 = r0; \ + r9 = r0; /* keep buf for seq_write */\ + r3 = 48; \ + r4 = 0; \ + call %[bpf_get_task_stack]; \ + if r0 s> 0 goto l2_%=; \ + r0 = 0; \ + exit; \ +l2_%=: r1 = r6; \ + r2 = r9; \ + r3 = r0; \ + call %[bpf_seq_write]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_task_stack), + __imm(bpf_map_lookup_elem), + __imm(bpf_seq_write), + __imm_addr(map_array_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c new file mode 100644 index 000000000000..a570e48b917a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/btf_ctx_access.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("fentry/bpf_modify_return_test") +__description("btf_ctx_access accept") +__success __retval(0) +__naked void btf_ctx_access_accept(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + 8); /* load 2nd argument value (int pointer) */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("fentry/bpf_fentry_test9") +__description("btf_ctx_access u32 pointer accept") +__success __retval(0) +__naked void ctx_access_u32_pointer_accept(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + 0); /* load 1nd argument value (u32 pointer) */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c new file mode 100644 index 000000000000..a83809a1dbbf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ctx.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("tc") +__description("context stores via BPF_ATOMIC") +__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed") +__naked void context_stores_via_bpf_atomic(void) +{ + asm volatile (" \ + r0 = 0; \ + lock *(u32 *)(r1 + %[__sk_buff_mark]) += w0; \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("arithmetic ops make PTR_TO_CTX unusable") +__failure __msg("dereference of modified ctx ptr") +__naked void make_ptr_to_ctx_unusable(void) +{ + asm volatile (" \ + r1 += %[__imm_0]; \ + r0 = *(u32*)(r1 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm_const(__imm_0, + offsetof(struct __sk_buff, data) - offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("pass unmodified ctx pointer to helper") +__success __retval(0) +__naked void unmodified_ctx_pointer_to_helper(void) +{ + asm volatile (" \ + r2 = 0; \ + call %[bpf_csum_update]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_update) + : __clobber_all); +} + +SEC("tc") +__description("pass modified ctx pointer to helper, 1") +__failure __msg("negative offset ctx ptr R1 off=-612 disallowed") +__naked void ctx_pointer_to_helper_1(void) +{ + asm volatile (" \ + r1 += -612; \ + r2 = 0; \ + call %[bpf_csum_update]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_update) + : __clobber_all); +} + +SEC("socket") +__description("pass modified ctx pointer to helper, 2") +__failure __msg("negative offset ctx ptr R1 off=-612 disallowed") +__failure_unpriv __msg_unpriv("negative offset ctx ptr R1 off=-612 disallowed") +__naked void ctx_pointer_to_helper_2(void) +{ + asm volatile (" \ + r1 += -612; \ + call %[bpf_get_socket_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_socket_cookie) + : __clobber_all); +} + +SEC("tc") +__description("pass modified ctx pointer to helper, 3") +__failure __msg("variable ctx access var_off=(0x0; 0x4)") +__naked void ctx_pointer_to_helper_3(void) +{ + asm volatile (" \ + r3 = *(u32*)(r1 + 0); \ + r3 &= 4; \ + r1 += r3; \ + r2 = 0; \ + call %[bpf_csum_update]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_update) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 1: ctx") +__success +__naked void or_null_check_1_ctx(void) +{ + asm volatile (" \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 2: null") +__success +__naked void or_null_check_2_null(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 3: 1") +__failure __msg("R1 type=scalar expected=ctx") +__naked void or_null_check_3_1(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 4: ctx - const") +__failure __msg("negative offset ctx ptr R1 off=-612 disallowed") +__naked void null_check_4_ctx_const(void) +{ + asm volatile (" \ + r1 += -612; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/connect4") +__description("pass ctx or null check, 5: null (connect)") +__success +__naked void null_check_5_null_connect(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("pass ctx or null check, 6: null (bind)") +__success +__naked void null_check_6_null_bind(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("pass ctx or null check, 7: ctx (bind)") +__success +__naked void null_check_7_ctx_bind(void) +{ + asm volatile (" \ + call %[bpf_get_socket_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_socket_cookie) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("pass ctx or null check, 8: null (bind)") +__failure __msg("R1 type=scalar expected=ctx") +__naked void null_check_8_null_bind(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_socket_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_socket_cookie) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_d_path.c b/tools/testing/selftests/bpf/progs/verifier_d_path.c new file mode 100644 index 000000000000..ec79cbcfde91 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_d_path.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/d_path.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("fentry/dentry_open") +__description("d_path accept") +__success __retval(0) +__naked void d_path_accept(void) +{ + asm volatile (" \ + r1 = *(u32*)(r1 + 0); \ + r2 = r10; \ + r2 += -8; \ + r6 = 0; \ + *(u64*)(r2 + 0) = r6; \ + r3 = 8 ll; \ + call %[bpf_d_path]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_d_path) + : __clobber_all); +} + +SEC("fentry/d_path") +__description("d_path reject") +__failure __msg("helper call is not allowed in probe") +__naked void d_path_reject(void) +{ + asm volatile (" \ + r1 = *(u32*)(r1 + 0); \ + r2 = r10; \ + r2 += -8; \ + r6 = 0; \ + *(u64*)(r2 + 0) = r6; \ + r3 = 8 ll; \ + call %[bpf_d_path]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_d_path) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c new file mode 100644 index 000000000000..99a23dea8233 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c @@ -0,0 +1,803 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/direct_packet_access.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("tc") +__description("pkt_end - pkt_start is allowed") +__success __retval(TEST_DATA_LEN) +__naked void end_pkt_start_is_allowed(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r0 -= r2; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test1") +__success __retval(0) +__naked void direct_packet_access_test1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test2") +__success __retval(0) +__naked void direct_packet_access_test2(void) +{ + asm volatile (" \ + r0 = 1; \ + r4 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data]); \ + r5 = r3; \ + r5 += 14; \ + if r5 > r4 goto l0_%=; \ + r0 = *(u8*)(r3 + 7); \ + r4 = *(u8*)(r3 + 12); \ + r4 *= 14; \ + r3 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 += r4; \ + r2 = *(u32*)(r1 + %[__sk_buff_len]); \ + r2 <<= 49; \ + r2 >>= 49; \ + r3 += r2; \ + r2 = r3; \ + r2 += 8; \ + r1 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + if r2 > r1 goto l1_%=; \ + r1 = *(u8*)(r3 + 4); \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("direct packet access: test3") +__failure __msg("invalid bpf_context access off=76") +__failure_unpriv +__naked void direct_packet_access_test3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test4 (write)") +__success __retval(0) +__naked void direct_packet_access_test4_write(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test5 (pkt_end >= reg, good access)") +__success __retval(0) +__naked void pkt_end_reg_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test6 (pkt_end >= reg, bad access)") +__failure __msg("invalid access to packet") +__naked void pkt_end_reg_bad_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test7 (pkt_end >= reg, both accesses)") +__failure __msg("invalid access to packet") +__naked void pkt_end_reg_both_accesses(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ + r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test8 (double test, variant 1)") +__success __retval(0) +__naked void test8_double_test_variant_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + if r0 > r3 goto l1_%=; \ + r0 = *(u8*)(r2 + 0); \ +l1_%=: r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test9 (double test, variant 2)") +__success __retval(0) +__naked void test9_double_test_variant_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = 1; \ + exit; \ +l0_%=: if r0 > r3 goto l1_%=; \ + r0 = *(u8*)(r2 + 0); \ +l1_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test10 (write invalid)") +__failure __msg("invalid access to packet") +__naked void packet_access_test10_write_invalid(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: *(u8*)(r2 + 0) = r2; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test11 (shift, good access)") +__success __retval(1) +__naked void access_test11_shift_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r3 = 144; \ + r5 = r3; \ + r5 += 23; \ + r5 >>= 3; \ + r6 = r2; \ + r6 += r5; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test12 (and, good access)") +__success __retval(1) +__naked void access_test12_and_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r3 = 144; \ + r5 = r3; \ + r5 += 23; \ + r5 &= 15; \ + r6 = r2; \ + r6 += r5; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test13 (branches, good access)") +__success __retval(1) +__naked void access_test13_branches_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r3 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r4 = 1; \ + if r3 > r4 goto l1_%=; \ + r3 = 14; \ + goto l2_%=; \ +l1_%=: r3 = 24; \ +l2_%=: r5 = r3; \ + r5 += 23; \ + r5 &= 15; \ + r6 = r2; \ + r6 += r5; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)") +__success __retval(1) +__naked void _0_const_imm_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r5 = 12; \ + r5 >>= 4; \ + r6 = r2; \ + r6 += r5; \ + r0 = *(u8*)(r6 + 0); \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test15 (spill with xadd)") +__failure __msg("R2 invalid mem access 'scalar'") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void access_test15_spill_with_xadd(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r5 = 4096; \ + r4 = r10; \ + r4 += -8; \ + *(u64*)(r4 + 0) = r2; \ + lock *(u64 *)(r4 + 0) += r5; \ + r2 = *(u64*)(r4 + 0); \ + *(u32*)(r2 + 0) = r5; \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test16 (arith on data_end)") +__failure __msg("R3 pointer arithmetic on pkt_end") +__naked void test16_arith_on_data_end(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + r3 += 16; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test17 (pruning, alignment)") +__failure __msg("misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4") +__flag(BPF_F_STRICT_ALIGNMENT) +__naked void packet_access_test17_pruning_alignment(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r0 = r2; \ + r0 += 14; \ + if r7 > 1 goto l0_%=; \ +l2_%=: if r0 > r3 goto l1_%=; \ + *(u32*)(r0 - 4) = r0; \ +l1_%=: r0 = 0; \ + exit; \ +l0_%=: r0 += 1; \ + goto l2_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test18 (imm += pkt_ptr, 1)") +__success __retval(0) +__naked void test18_imm_pkt_ptr_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = 8; \ + r0 += r2; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test19 (imm += pkt_ptr, 2)") +__success __retval(0) +__naked void test19_imm_pkt_ptr_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r4 = 4; \ + r4 += r2; \ + *(u8*)(r4 + 0) = r4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test20 (x += pkt_ptr, 1)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test20_x_pkt_ptr_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = 0xffffffff; \ + *(u64*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + r0 &= 0x7fff; \ + r4 = r0; \ + r4 += r2; \ + r5 = r4; \ + r4 += %[__imm_0]; \ + if r4 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0x7fff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test21 (x += pkt_ptr, 2)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test21_x_pkt_ptr_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r4 = 0xffffffff; \ + *(u64*)(r10 - 8) = r4; \ + r4 = *(u64*)(r10 - 8); \ + r4 &= 0x7fff; \ + r4 += r2; \ + r5 = r4; \ + r4 += %[__imm_0]; \ + if r4 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0x7fff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test22 (x += pkt_ptr, 3)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test22_x_pkt_ptr_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + *(u64*)(r10 - 8) = r2; \ + *(u64*)(r10 - 16) = r3; \ + r3 = *(u64*)(r10 - 16); \ + if r0 > r3 goto l0_%=; \ + r2 = *(u64*)(r10 - 8); \ + r4 = 0xffffffff; \ + lock *(u64 *)(r10 - 8) += r4; \ + r4 = *(u64*)(r10 - 8); \ + r4 >>= 49; \ + r4 += r2; \ + r0 = r4; \ + r0 += 2; \ + if r0 > r3 goto l0_%=; \ + r2 = 1; \ + *(u16*)(r4 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test23 (x += pkt_ptr, 4)") +__failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void test23_x_pkt_ptr_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = *(u32*)(r1 + %[__sk_buff_mark]); \ + *(u64*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + r0 &= 0xffff; \ + r4 = r0; \ + r0 = 31; \ + r0 += r4; \ + r0 += r2; \ + r5 = r0; \ + r0 += %[__imm_0]; \ + if r0 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test24 (x += pkt_ptr, 5)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test24_x_pkt_ptr_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = 0xffffffff; \ + *(u64*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + r0 &= 0xff; \ + r4 = r0; \ + r0 = 64; \ + r0 += r4; \ + r0 += r2; \ + r5 = r0; \ + r0 += %[__imm_0]; \ + if r0 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0x7fff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test25 (marking on <, good access)") +__success __retval(0) +__naked void test25_marking_on_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 < r3 goto l0_%=; \ +l1_%=: r0 = 0; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + goto l1_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test26 (marking on <, bad access)") +__failure __msg("invalid access to packet") +__naked void test26_marking_on_bad_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 < r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l1_%=: r0 = 0; \ + exit; \ +l0_%=: goto l1_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test27 (marking on <=, good access)") +__success __retval(1) +__naked void test27_marking_on_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 <= r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test28 (marking on <=, bad access)") +__failure __msg("invalid access to packet") +__naked void test28_marking_on_bad_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 <= r0 goto l0_%=; \ +l1_%=: r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + goto l1_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test29 (reg > pkt_end in subprog)") +__success __retval(0) +__naked void reg_pkt_end_in_subprog(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r2 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r3 = r6; \ + r3 += 8; \ + call reg_pkt_end_in_subprog__1; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u8*)(r6 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void reg_pkt_end_in_subprog__1(void) +{ + asm volatile (" \ + r0 = 0; \ + if r3 > r2 goto l0_%=; \ + r0 = 1; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("direct packet access: test30 (check_id() in regsafe(), bad access)") +__failure __msg("invalid access to packet, off=0 size=1, R2") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void id_in_regsafe_bad_access(void) +{ + asm volatile (" \ + /* r9 = ctx */ \ + r9 = r1; \ + /* r7 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r7 = r0; \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* r2 = ctx->data \ + * r3 = ctx->data \ + * r4 = ctx->data_end \ + */ \ + r2 = *(u32*)(r9 + %[__sk_buff_data]); \ + r3 = *(u32*)(r9 + %[__sk_buff_data]); \ + r4 = *(u32*)(r9 + %[__sk_buff_data_end]); \ + /* if r6 > 100 goto exit \ + * if r7 > 100 goto exit \ + */ \ + if r6 > 100 goto l0_%=; \ + if r7 > 100 goto l0_%=; \ + /* r2 += r6 ; this forces assignment of ID to r2\ + * r2 += 1 ; get some fixed off for r2\ + * r3 += r7 ; this forces assignment of ID to r3\ + * r3 += 1 ; get some fixed off for r3\ + */ \ + r2 += r6; \ + r2 += 1; \ + r3 += r7; \ + r3 += 1; \ + /* if r6 > r7 goto +1 ; no new information about the state is derived from\ + * ; this check, thus produced verifier states differ\ + * ; only in 'insn_idx' \ + * r2 = r3 ; optionally share ID between r2 and r3\ + */ \ + if r6 != r7 goto l1_%=; \ + r2 = r3; \ +l1_%=: /* if r3 > ctx->data_end goto exit */ \ + if r3 > r4 goto l0_%=; \ + /* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2,\ + * ; this is not always safe\ + */ \ + r5 = *(u8*)(r2 - 1); \ +l0_%=: /* exit(0) */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c new file mode 100644 index 000000000000..bf16b00502f2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_xskmap SEC(".maps"); + +/* This is equivalent to the following program: + * + * r6 = skb->sk; + * r7 = sk_fullsock(r6); + * r0 = sk_fullsock(r6); + * if (r0 == 0) return 0; (a) + * if (r0 != r7) return 0; (b) + * *r7->type; (c) + * return 0; + * + * It is safe to dereference r7 at point (c), because of (a) and (b). + * The test verifies that relation r0 == r7 is propagated from (b) to (c). + */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch") +__success __failure_unpriv __msg_unpriv("R7 pointer comparison") +__retval(0) +__naked void socket_for_jne_false_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk; */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == 0) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 == 0 goto l0_%=; \ + /* if (r0 == r7) r0 = *(r7->type); */ \ + if r0 != r7 goto l0_%=; /* Use ! JNE ! */\ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0 */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Same as above, but verify that another branch of JNE still + * prohibits access to PTR_MAYBE_NULL. + */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch") +__failure __msg("R7 invalid mem access 'sock_or_null'") +__failure_unpriv __msg_unpriv("R7 pointer comparison") +__naked void unchanged_for_jne_true_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == 0) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 != 0 goto l0_%=; \ + /* if (r0 == r7) return 0; */ \ + if r0 != r7 goto l1_%=; /* Use ! JNE ! */\ + goto l0_%=; \ +l1_%=: /* r0 = *(r7->type); */ \ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0 */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Same as a first test, but not null should be inferred for JEQ branch */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch") +__success __failure_unpriv __msg_unpriv("R7 pointer comparison") +__retval(0) +__naked void socket_for_jeq_true_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk; */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == null) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 == 0 goto l0_%=; \ + /* if (r0 != r7) return 0; */ \ + if r0 == r7 goto l1_%=; /* Use ! JEQ ! */\ + goto l0_%=; \ +l1_%=: /* r0 = *(r7->type); */ \ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0; */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Same as above, but verify that another branch of JNE still + * prohibits access to PTR_MAYBE_NULL. + */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch") +__failure __msg("R7 invalid mem access 'sock_or_null'") +__failure_unpriv __msg_unpriv("R7 pointer comparison") +__naked void unchanged_for_jeq_false_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk; */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == null) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 == 0 goto l0_%=; \ + /* if (r0 != r7) r0 = *(r7->type); */ \ + if r0 == r7 goto l0_%=; /* Use ! JEQ ! */\ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0; */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Maps are treated in a different branch of `mark_ptr_not_null_reg`, + * so separate test for maps case. + */ +SEC("xdp") +__description("jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE") +__success __retval(0) +__naked void null_ptr_to_map_value(void) +{ + asm volatile (" \ + /* r9 = &some stack to use as key */ \ + r1 = 0; \ + *(u32*)(r10 - 8) = r1; \ + r9 = r10; \ + r9 += -8; \ + /* r8 = process local map */ \ + r8 = %[map_xskmap] ll; \ + /* r6 = map_lookup_elem(r8, r9); */ \ + r1 = r8; \ + r2 = r9; \ + call %[bpf_map_lookup_elem]; \ + r6 = r0; \ + /* r7 = map_lookup_elem(r8, r9); */ \ + r1 = r8; \ + r2 = r9; \ + call %[bpf_map_lookup_elem]; \ + r7 = r0; \ + /* if (r6 == 0) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* if (r6 != r7) return 0; */ \ + if r6 != r7 goto l0_%=; \ + /* read *r7; */ \ + r0 = *(u32*)(r7 + %[bpf_xdp_sock_queue_id]); \ +l0_%=: /* return 0; */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_xskmap), + __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c new file mode 100644 index 000000000000..5bc86af80a9a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/loops1.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("xdp") +__description("bounded loop, count to 4") +__success __retval(4) +__naked void bounded_loop_count_to_4(void) +{ + asm volatile (" \ + r0 = 0; \ +l0_%=: r0 += 1; \ + if r0 < 4 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count to 20") +__success +__naked void bounded_loop_count_to_20(void) +{ + asm volatile (" \ + r0 = 0; \ +l0_%=: r0 += 3; \ + if r0 < 20 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count from positive unknown to 4") +__success +__naked void from_positive_unknown_to_4(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + if r0 s< 0 goto l0_%=; \ +l1_%=: r0 += 1; \ + if r0 < 4 goto l1_%=; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count from totally unknown to 4") +__success +__naked void from_totally_unknown_to_4(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ +l0_%=: r0 += 1; \ + if r0 < 4 goto l0_%=; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count to 4 with equality") +__success +__naked void count_to_4_with_equality(void) +{ + asm volatile (" \ + r0 = 0; \ +l0_%=: r0 += 1; \ + if r0 != 4 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, start in the middle") +__failure __msg("back-edge") +__naked void loop_start_in_the_middle(void) +{ + asm volatile (" \ + r0 = 0; \ + goto l0_%=; \ +l1_%=: r0 += 1; \ +l0_%=: if r0 < 4 goto l1_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("bounded loop containing a forward jump") +__success __retval(4) +__naked void loop_containing_a_forward_jump(void) +{ + asm volatile (" \ + r0 = 0; \ +l1_%=: r0 += 1; \ + if r0 == r0 goto l0_%=; \ +l0_%=: if r0 < 4 goto l1_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop that jumps out rather than in") +__success +__naked void jumps_out_rather_than_in(void) +{ + asm volatile (" \ + r6 = 0; \ +l1_%=: r6 += 1; \ + if r6 > 10000 goto l0_%=; \ + call %[bpf_get_prandom_u32]; \ + goto l1_%=; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("tracepoint") +__description("infinite loop after a conditional jump") +__failure __msg("program is too large") +__naked void loop_after_a_conditional_jump(void) +{ + asm volatile (" \ + r0 = 5; \ + if r0 < 4 goto l0_%=; \ +l1_%=: r0 += 1; \ + goto l1_%=; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded recursion") +__failure __msg("back-edge") +__naked void bounded_recursion(void) +{ + asm volatile (" \ + r1 = 0; \ + call bounded_recursion__1; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void bounded_recursion__1(void) +{ + asm volatile (" \ + r1 += 1; \ + r0 = r1; \ + if r1 < 4 goto l0_%=; \ + exit; \ +l0_%=: call bounded_recursion__1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("infinite loop in two jumps") +__failure __msg("loop detected") +__naked void infinite_loop_in_two_jumps(void) +{ + asm volatile (" \ + r0 = 0; \ +l1_%=: goto l0_%=; \ +l0_%=: if r0 < 4 goto l1_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("infinite loop: three-jump trick") +__failure __msg("loop detected") +__naked void infinite_loop_three_jump_trick(void) +{ + asm volatile (" \ + r0 = 0; \ +l2_%=: r0 += 1; \ + r0 &= 1; \ + if r0 < 2 goto l0_%=; \ + exit; \ +l0_%=: r0 += 1; \ + r0 &= 1; \ + if r0 < 2 goto l1_%=; \ + exit; \ +l1_%=: r0 += 1; \ + r0 &= 1; \ + if r0 < 2 goto l2_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("not-taken loop with back jump to 1st insn") +__success __retval(123) +__naked void back_jump_to_1st_insn_1(void) +{ + asm volatile (" \ +l0_%=: r0 = 123; \ + if r0 == 4 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("taken loop with back jump to 1st insn") +__success __retval(55) +__naked void back_jump_to_1st_insn_2(void) +{ + asm volatile (" \ + r1 = 10; \ + r2 = 0; \ + call back_jump_to_1st_insn_2__1; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void back_jump_to_1st_insn_2__1(void) +{ + asm volatile (" \ +l0_%=: r2 += r1; \ + r1 -= 1; \ + if r1 != 0 goto l0_%=; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("taken loop with back jump to 1st insn, 2") +__success __retval(55) +__naked void jump_to_1st_insn_2(void) +{ + asm volatile (" \ + r1 = 10; \ + r2 = 0; \ + call jump_to_1st_insn_2__1; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void jump_to_1st_insn_2__1(void) +{ + asm volatile (" \ +l0_%=: r2 += r1; \ + r1 -= 1; \ + if w1 != 0 goto l0_%=; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_lwt.c b/tools/testing/selftests/bpf/progs/verifier_lwt.c new file mode 100644 index 000000000000..5ab746307309 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_lwt.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/lwt.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("lwt_in") +__description("invalid direct packet write for LWT_IN") +__failure __msg("cannot write into packet") +__naked void packet_write_for_lwt_in(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_out") +__description("invalid direct packet write for LWT_OUT") +__failure __msg("cannot write into packet") +__naked void packet_write_for_lwt_out(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("direct packet write for LWT_XMIT") +__success __retval(0) +__naked void packet_write_for_lwt_xmit(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_in") +__description("direct packet read for LWT_IN") +__success __retval(0) +__naked void packet_read_for_lwt_in(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_out") +__description("direct packet read for LWT_OUT") +__success __retval(0) +__naked void packet_read_for_lwt_out(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("direct packet read for LWT_XMIT") +__success __retval(0) +__naked void packet_read_for_lwt_xmit(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("overlapping checks for direct packet access") +__success __retval(0) +__naked void checks_for_direct_packet_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r1 = r2; \ + r1 += 6; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u16*)(r2 + 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("make headroom for LWT_XMIT") +__success __retval(0) +__naked void make_headroom_for_lwt_xmit(void) +{ + asm volatile (" \ + r6 = r1; \ + r2 = 34; \ + r3 = 0; \ + call %[bpf_skb_change_head]; \ + /* split for s390 to succeed */ \ + r1 = r6; \ + r2 = 42; \ + r3 = 0; \ + call %[bpf_skb_change_head]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_skb_change_head) + : __clobber_all); +} + +SEC("socket") +__description("invalid access of tc_classid for LWT_IN") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_lwt_in(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("socket") +__description("invalid access of tc_classid for LWT_OUT") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_lwt_out(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("socket") +__description("invalid access of tc_classid for LWT_XMIT") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_lwt_xmit(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("lwt_in") +__description("check skb->tc_classid half load not permitted for lwt prog") +__failure __msg("invalid bpf_context access") +__naked void not_permitted_for_lwt_prog(void) +{ + asm volatile ( + "r0 = 0;" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r0 = *(u16*)(r1 + %[__sk_buff_tc_classid]);" +#else + "r0 = *(u16*)(r1 + %[__imm_0]);" +#endif + "exit;" + : + : __imm_const(__imm_0, offsetof(struct __sk_buff, tc_classid) + 2), + __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c new file mode 100644 index 000000000000..4eaab1468eb7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/map_in_map.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + }); +} map_in_map SEC(".maps"); + +SEC("socket") +__description("map in map access") +__success __success_unpriv __retval(0) +__naked void map_in_map_access(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("xdp") +__description("map in map state pruning") +__success __msg("processed 26 insns") +__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ) +__naked void map_in_map_state_pruning(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r6 = r10; \ + r6 += -4; \ + r2 = r6; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r2 = r6; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + r2 = r6; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l2_%=; \ + exit; \ +l2_%=: r2 = r6; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + 0); \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("invalid inner map pointer") +__failure __msg("R1 pointer arithmetic on map_ptr prohibited") +__failure_unpriv +__naked void invalid_inner_map_pointer(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = r0; \ + r1 += 8; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("forgot null checking on the inner map pointer") +__failure __msg("R1 type=map_value_or_null expected=map_ptr") +__failure_unpriv +__naked void on_the_inner_map_pointer(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c b/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c new file mode 100644 index 000000000000..c5a7c1ddc562 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/map_ptr_mixing.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + }); +} map_in_map SEC(".maps"); + +void dummy_prog_42_socket(void); +void dummy_prog_24_socket(void); +void dummy_prog_loop1_socket(void); +void dummy_prog_loop2_socket(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_socket SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_socket, + [1] = (void *)&dummy_prog_loop1_socket, + [2] = (void *)&dummy_prog_24_socket, + }, +}; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 8); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog2_socket SEC(".maps") = { + .values = { + [1] = (void *)&dummy_prog_loop2_socket, + [2] = (void *)&dummy_prog_24_socket, + [7] = (void *)&dummy_prog_42_socket, + }, +}; + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_42_socket(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_24_socket(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop1_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop2_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog2_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog2_socket) + : __clobber_all); +} + +SEC("tc") +__description("calls: two calls returning different map pointers for lookup (hash, array)") +__success __retval(1) +__naked void pointers_for_lookup_hash_array(void) +{ + asm volatile (" \ + /* main prog */ \ + if r1 != 0 goto l0_%=; \ + call pointers_for_lookup_hash_array__1; \ + goto l1_%=; \ +l0_%=: call pointers_for_lookup_hash_array__2; \ +l1_%=: r1 = r0; \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ + r0 = 1; \ +l2_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void pointers_for_lookup_hash_array__1(void) +{ + asm volatile (" \ + r0 = %[map_hash_48b] ll; \ + exit; \ +" : + : __imm_addr(map_hash_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void pointers_for_lookup_hash_array__2(void) +{ + asm volatile (" \ + r0 = %[map_array_48b] ll; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("tc") +__description("calls: two calls returning different map pointers for lookup (hash, map in map)") +__failure __msg("only read from bpf_array is supported") +__naked void lookup_hash_map_in_map(void) +{ + asm volatile (" \ + /* main prog */ \ + if r1 != 0 goto l0_%=; \ + call lookup_hash_map_in_map__1; \ + goto l1_%=; \ +l0_%=: call lookup_hash_map_in_map__2; \ +l1_%=: r1 = r0; \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ + r0 = 1; \ +l2_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void lookup_hash_map_in_map__1(void) +{ + asm volatile (" \ + r0 = %[map_array_48b] ll; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void lookup_hash_map_in_map__2(void) +{ + asm volatile (" \ + r0 = %[map_in_map] ll; \ + exit; \ +" : + : __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("cond: two branches returning different map pointers for lookup (tail, tail)") +__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") +__retval(42) +__naked void pointers_for_lookup_tail_tail_1(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + if r6 != 0 goto l0_%=; \ + r2 = %[map_prog2_socket] ll; \ + goto l1_%=; \ +l0_%=: r2 = %[map_prog1_socket] ll; \ +l1_%=: r3 = 7; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("cond: two branches returning same map pointers for lookup (tail, tail)") +__success __success_unpriv __retval(42) +__naked void pointers_for_lookup_tail_tail_2(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + if r6 == 0 goto l0_%=; \ + r2 = %[map_prog2_socket] ll; \ + goto l1_%=; \ +l0_%=: r2 = %[map_prog2_socket] ll; \ +l1_%=: r3 = 7; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c new file mode 100644 index 000000000000..65bba330e7e5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" + +#include "bpf_misc.h" + +#include <bpf/bpf_endian.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +SEC("netfilter") +__description("netfilter invalid context access, size too short") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test1(void) +{ + asm volatile (" \ + r2 = *(u8*)(r1 + %[__bpf_nf_ctx_state]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_state, offsetof(struct bpf_nf_ctx, state)) + : __clobber_all); +} + +SEC("netfilter") +__description("netfilter invalid context access, size too short") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test2(void) +{ + asm volatile (" \ + r2 = *(u16*)(r1 + %[__bpf_nf_ctx_skb]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_skb, offsetof(struct bpf_nf_ctx, skb)) + : __clobber_all); +} + +SEC("netfilter") +__description("netfilter invalid context access, past end of ctx") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test3(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[__bpf_nf_ctx_size]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_size, sizeof(struct bpf_nf_ctx)) + : __clobber_all); +} + +SEC("netfilter") +__description("netfilter invalid context, write") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test4(void) +{ + asm volatile (" \ + r2 = r1; \ + *(u64*)(r2 + 0) = r1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_skb, offsetof(struct bpf_nf_ctx, skb)) + : __clobber_all); +} + +#define NF_DROP 0 +#define NF_ACCEPT 1 + +SEC("netfilter") +__description("netfilter valid context read and invalid write") +__failure __msg("only read is supported") +int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx) +{ + struct nf_hook_state *state = (void *)ctx->state; + + state->sk = NULL; + return NF_ACCEPT; +} + +extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags, + struct bpf_dynptr *ptr__uninit) __ksym; +extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset, + void *buffer, uint32_t buffer__sz) __ksym; + +SEC("netfilter") +__description("netfilter test prog with skb and state read access") +__success __failure_unpriv +__retval(0) +int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx) +{ + const struct nf_hook_state *state = ctx->state; + struct sk_buff *skb = ctx->skb; + const struct iphdr *iph; + const struct tcphdr *th; + u8 buffer_iph[20] = {}; + u8 buffer_th[40] = {}; + struct bpf_dynptr ptr; + uint8_t ihl; + + if (skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr)) + return NF_ACCEPT; + + iph = bpf_dynptr_slice(&ptr, 0, buffer_iph, sizeof(buffer_iph)); + if (!iph) + return NF_ACCEPT; + + if (state->pf != 2) + return NF_ACCEPT; + + ihl = iph->ihl << 2; + + th = bpf_dynptr_slice(&ptr, ihl, buffer_th, sizeof(buffer_th)); + if (!th) + return NF_ACCEPT; + + return th->dest == bpf_htons(22) ? NF_ACCEPT : NF_DROP; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c new file mode 100644 index 000000000000..353ae6da00e1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("netfilter") +__description("bpf_exit with invalid return code. test1") +__failure __msg("R0 is not a known value") +__naked void with_invalid_return_code_test1(void) +{ + asm volatile (" \ + r0 = *(u64*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("netfilter") +__description("bpf_exit with valid return code. test2") +__success +__naked void with_valid_return_code_test2(void) +{ + asm volatile (" \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("netfilter") +__description("bpf_exit with valid return code. test3") +__success +__naked void with_valid_return_code_test3(void) +{ + asm volatile (" \ + r0 = 1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("netfilter") +__description("bpf_exit with invalid return code. test4") +__failure __msg("R0 has value (0x2; 0x0)") +__naked void with_invalid_return_code_test4(void) +{ + asm volatile (" \ + r0 = 2; \ + exit; \ +" ::: __clobber_all); +} diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c new file mode 100644 index 000000000000..c4c6da21265e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c @@ -0,0 +1,1495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ref_tracking.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +#define BPF_SK_LOOKUP(func) \ + /* struct bpf_sock_tuple tuple = {} */ \ + "r2 = 0;" \ + "*(u32*)(r10 - 8) = r2;" \ + "*(u64*)(r10 - 16) = r2;" \ + "*(u64*)(r10 - 24) = r2;" \ + "*(u64*)(r10 - 32) = r2;" \ + "*(u64*)(r10 - 40) = r2;" \ + "*(u64*)(r10 - 48) = r2;" \ + /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \ + "r2 = r10;" \ + "r2 += -48;" \ + "r3 = %[sizeof_bpf_sock_tuple];"\ + "r4 = 0;" \ + "r5 = 0;" \ + "call %[" #func "];" + +struct bpf_key {} __attribute__((preserve_access_index)); + +extern void bpf_key_put(struct bpf_key *key) __ksym; +extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; + +/* BTF FUNC records are not generated for kfuncs referenced + * from inline assembly. These records are necessary for + * libbpf to link the program. The function below is a hack + * to ensure that BTF FUNC records are generated. + */ +void __kfunc_btf_root(void) +{ + bpf_key_put(0); + bpf_lookup_system_key(0); + bpf_lookup_user_key(0, 0); +} + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); +} map_ringbuf SEC(".maps"); + +void dummy_prog_42_tc(void); +void dummy_prog_24_tc(void); +void dummy_prog_loop1_tc(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_tc SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_tc, + [1] = (void *)&dummy_prog_loop1_tc, + [2] = (void *)&dummy_prog_24_tc, + }, +}; + +SEC("tc") +__auxiliary +__naked void dummy_prog_42_tc(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("tc") +__auxiliary +__naked void dummy_prog_24_tc(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("tc") +__auxiliary +__naked void dummy_prog_loop1_tc(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_tc] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_tc) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference") +__failure __msg("Unreleased reference") +__naked void reference_tracking_leak_potential_reference(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference to sock_common") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_sock_common_1(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" r6 = r0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference on stack") +__failure __msg("Unreleased reference") +__naked void leak_potential_reference_on_stack(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r4 = r10; \ + r4 += -8; \ + *(u64*)(r4 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference on stack 2") +__failure __msg("Unreleased reference") +__naked void potential_reference_on_stack_2(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r4 = r10; \ + r4 += -8; \ + *(u64*)(r4 + 0) = r0; \ + r0 = 0; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: zero potential reference") +__failure __msg("Unreleased reference") +__naked void reference_tracking_zero_potential_reference(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r0 = 0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: zero potential reference to sock_common") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_sock_common_2(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" r0 = 0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: copy and zero potential references") +__failure __msg("Unreleased reference") +__naked void copy_and_zero_potential_references(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r7 = r0; \ + r0 = 0; \ + r7 = 0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: acquire/release user key reference") +__success +__naked void acquire_release_user_key_reference(void) +{ + asm volatile (" \ + r1 = -3; \ + r2 = 0; \ + call %[bpf_lookup_user_key]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_key_put]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_user_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: acquire/release system key reference") +__success +__naked void acquire_release_system_key_reference(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_lookup_system_key]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_key_put]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_system_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: release user key reference without check") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +__naked void user_key_reference_without_check(void) +{ + asm volatile (" \ + r1 = -3; \ + r2 = 0; \ + call %[bpf_lookup_user_key]; \ + r1 = r0; \ + call %[bpf_key_put]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_user_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: release system key reference without check") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +__naked void system_key_reference_without_check(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_lookup_system_key]; \ + r1 = r0; \ + call %[bpf_key_put]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_system_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: release with NULL key pointer") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +__naked void release_with_null_key_pointer(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_key_put]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: leak potential reference to user key") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_user_key(void) +{ + asm volatile (" \ + r1 = -3; \ + r2 = 0; \ + call %[bpf_lookup_user_key]; \ + exit; \ +" : + : __imm(bpf_lookup_user_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: leak potential reference to system key") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_system_key(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_lookup_system_key]; \ + exit; \ +" : + : __imm(bpf_lookup_system_key) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference without check") +__failure __msg("type=sock_or_null expected=sock") +__naked void tracking_release_reference_without_check(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* reference in r0 may be NULL */ \ + r1 = r0; \ + r2 = 0; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference to sock_common without check") +__failure __msg("type=sock_common_or_null expected=sock") +__naked void to_sock_common_without_check(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" /* reference in r0 may be NULL */ \ + r1 = r0; \ + r2 = 0; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_release), + __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference") +__success __retval(0) +__naked void reference_tracking_release_reference(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference to sock_common") +__success __retval(0) +__naked void release_reference_to_sock_common(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release), + __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference 2") +__success __retval(0) +__naked void reference_tracking_release_reference_2(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference twice") +__failure __msg("type=scalar expected=sock") +__naked void reference_tracking_release_reference_twice(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + r6 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference twice inside branch") +__failure __msg("type=scalar expected=sock") +__naked void release_reference_twice_inside_branch(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + r6 = r0; \ + if r0 == 0 goto l0_%=; /* goto end */ \ + call %[bpf_sk_release]; \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: alloc, check, free in one subbranch") +__failure __msg("Unreleased reference") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void check_free_in_one_subbranch(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 16; \ + /* if (offsetof(skb, mark) > data_len) exit; */ \ + if r0 <= r3 goto l0_%=; \ + exit; \ +l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r6 == 0 goto l1_%=; /* mark == 0? */\ + /* Leak reference in R0 */ \ + exit; \ +l1_%=: if r0 == 0 goto l2_%=; /* sk NULL? */ \ + r1 = r0; \ + call %[bpf_sk_release]; \ +l2_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: alloc, check, free in both subbranches") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void check_free_in_both_subbranches(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 16; \ + /* if (offsetof(skb, mark) > data_len) exit; */ \ + if r0 <= r3 goto l0_%=; \ + exit; \ +l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r6 == 0 goto l1_%=; /* mark == 0? */\ + if r0 == 0 goto l2_%=; /* sk NULL? */ \ + r1 = r0; \ + call %[bpf_sk_release]; \ +l2_%=: exit; \ +l1_%=: if r0 == 0 goto l3_%=; /* sk NULL? */ \ + r1 = r0; \ + call %[bpf_sk_release]; \ +l3_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: free reference in subprog") +__success __retval(0) +__naked void call_free_reference_in_subprog(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; /* unchecked reference */ \ + call call_free_reference_in_subprog__1; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void call_free_reference_in_subprog__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r2 = r1; \ + if r2 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: free reference in subprog and outside") +__failure __msg("type=scalar expected=sock") +__naked void reference_in_subprog_and_outside(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; /* unchecked reference */ \ + r6 = r0; \ + call reference_in_subprog_and_outside__1; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void reference_in_subprog_and_outside__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r2 = r1; \ + if r2 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: alloc & leak reference in subprog") +__failure __msg("Unreleased reference") +__naked void alloc_leak_reference_in_subprog(void) +{ + asm volatile (" \ + r4 = r10; \ + r4 += -8; \ + call alloc_leak_reference_in_subprog__1; \ + r1 = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void alloc_leak_reference_in_subprog__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r6 = r4; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* spill unchecked sk_ptr into stack of caller */\ + *(u64*)(r6 + 0) = r0; \ + r1 = r0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: alloc in subprog, release outside") +__success __retval(POINTER_VALUE) +__naked void alloc_in_subprog_release_outside(void) +{ + asm volatile (" \ + r4 = r10; \ + call alloc_in_subprog_release_outside__1; \ + r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void alloc_in_subprog_release_outside__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" exit; /* return sk */ \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: sk_ptr leak into caller stack") +__failure __msg("Unreleased reference") +__naked void ptr_leak_into_caller_stack(void) +{ + asm volatile (" \ + r4 = r10; \ + r4 += -8; \ + call ptr_leak_into_caller_stack__1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_leak_into_caller_stack__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r5 = r10; \ + r5 += -8; \ + *(u64*)(r5 + 0) = r4; \ + call ptr_leak_into_caller_stack__2; \ + /* spill unchecked sk_ptr into stack of caller */\ + r5 = r10; \ + r5 += -8; \ + r4 = *(u64*)(r5 + 0); \ + *(u64*)(r4 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_leak_into_caller_stack__2(void) +{ + asm volatile (" \ + /* subprog 2 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: sk_ptr spill into caller stack") +__success __retval(0) +__naked void ptr_spill_into_caller_stack(void) +{ + asm volatile (" \ + r4 = r10; \ + r4 += -8; \ + call ptr_spill_into_caller_stack__1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_spill_into_caller_stack__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r5 = r10; \ + r5 += -8; \ + *(u64*)(r5 + 0) = r4; \ + call ptr_spill_into_caller_stack__2; \ + /* spill unchecked sk_ptr into stack of caller */\ + r5 = r10; \ + r5 += -8; \ + r4 = *(u64*)(r5 + 0); \ + *(u64*)(r4 + 0) = r0; \ + if r0 == 0 goto l0_%=; \ + /* now the sk_ptr is verified, free the reference */\ + r1 = *(u64*)(r4 + 0); \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_spill_into_caller_stack__2(void) +{ + asm volatile (" \ + /* subprog 2 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: allow LD_ABS") +__success __retval(0) +__naked void reference_tracking_allow_ld_abs(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r0 = *(u8*)skb[0]; \ + r0 = *(u16*)skb[0]; \ + r0 = *(u32*)skb[0]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: forbid LD_ABS while holding reference") +__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references") +__naked void ld_abs_while_holding_reference(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r0 = *(u8*)skb[0]; \ + r0 = *(u16*)skb[0]; \ + r0 = *(u32*)skb[0]; \ + r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: allow LD_IND") +__success __retval(1) +__naked void reference_tracking_allow_ld_ind(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r7 = 1; \ + .8byte %[ld_ind]; \ + r0 = r7; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)), + __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: forbid LD_IND while holding reference") +__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references") +__naked void ld_ind_while_holding_reference(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r4 = r0; \ + r7 = 1; \ + .8byte %[ld_ind]; \ + r0 = r7; \ + r1 = r4; \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)), + __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: check reference or tail call") +__success __retval(0) +__naked void check_reference_or_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* if (sk) bpf_sk_release() */ \ + r1 = r0; \ + if r1 != 0 goto l0_%=; \ + /* bpf_tail_call() */ \ + r3 = 3; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference then tail call") +__success __retval(0) +__naked void release_reference_then_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* if (sk) bpf_sk_release() */ \ + r1 = r0; \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: /* bpf_tail_call() */ \ + r3 = 3; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak possible reference over tail call") +__failure __msg("tail_call would lead to reference leak") +__naked void possible_reference_over_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ + /* Look up socket and store in REG_6 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* bpf_tail_call() */ \ + r6 = r0; \ + r3 = 3; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + /* if (sk) bpf_sk_release() */ \ + r1 = r6; \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak checked reference over tail call") +__failure __msg("tail_call would lead to reference leak") +__naked void checked_reference_over_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ + /* Look up socket and store in REG_6 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + /* if (!sk) goto end */ \ + if r0 == 0 goto l0_%=; \ + /* bpf_tail_call() */ \ + r3 = 0; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + r1 = r6; \ +l0_%=: call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: mangle and release sock_or_null") +__failure __msg("R1 pointer arithmetic on sock_or_null prohibited") +__naked void and_release_sock_or_null(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + r1 += 5; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: mangle and release sock") +__failure __msg("R1 pointer arithmetic on sock prohibited") +__naked void tracking_mangle_and_release_sock(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + r1 += 5; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: access member") +__success __retval(0) +__naked void reference_tracking_access_member(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u32*)(r0 + 4); \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: write to member") +__failure __msg("cannot write into sock") +__naked void reference_tracking_write_to_member(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r1 = r6; \ + r2 = 42 ll; \ + *(u32*)(r1 + %[bpf_sock_mark]) = r2; \ + r1 = r6; \ +l0_%=: call %[bpf_sk_release]; \ + r0 = 0 ll; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: invalid 64-bit access of member") +__failure __msg("invalid sock access off=0 size=8") +__naked void _64_bit_access_of_member(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u64*)(r0 + 0); \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: access after release") +__failure __msg("!read_ok") +__naked void reference_tracking_access_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ + r2 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: direct access for lookup") +__success __retval(0) +__naked void tracking_direct_access_for_lookup(void) +{ + asm volatile (" \ + /* Check that the packet is at least 64B long */\ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 64; \ + if r0 > r3 goto l0_%=; \ + /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ \ + r3 = %[sizeof_bpf_sock_tuple]; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_sk_lookup_tcp]; \ + r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u32*)(r0 + 4); \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_tcp_sock() after release") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void bpf_tcp_sock_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r7 + %[bpf_tcp_sock_snd_cwnd]); \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_sk_fullsock() after release") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void bpf_sk_fullsock_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_sk_fullsock(tp) after release") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void sk_fullsock_tp_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_fullsock]; \ + r1 = r6; \ + r6 = r0; \ + call %[bpf_sk_release]; \ + if r6 != 0 goto l2_%=; \ + exit; \ +l2_%=: r0 = *(u32*)(r6 + %[bpf_sock_type]); \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use sk after bpf_sk_release(tp)") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void after_bpf_sk_release_tp(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r6 + %[bpf_sock_type]); \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)") +__success __retval(0) +__naked void after_bpf_sk_release_sk(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_get_listener_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r6; \ + r6 = r0; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r6 + %[bpf_sock_src_port]); \ + exit; \ +" : + : __imm(bpf_get_listener_sock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: bpf_sk_release(listen_sk)") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_sk_release_listen_sk(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_get_listener_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r6 + %[bpf_sock_type]); \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_get_listener_sock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +/* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */ +SEC("tc") +__description("reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)") +__failure __msg("invalid mem access") +__naked void and_bpf_tcp_sock_sk(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + r1 = r6; \ + call %[bpf_tcp_sock]; \ + r8 = r0; \ + if r7 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r0 = *(u32*)(r8 + %[bpf_tcp_sock_snd_cwnd]); \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: branch tracking valid pointer null comparison") +__success __retval(0) +__naked void tracking_valid_pointer_null_comparison(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + r3 = 1; \ + if r6 != 0 goto l0_%=; \ + r3 = 0; \ +l0_%=: if r6 == 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l1_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: branch tracking valid pointer value comparison") +__failure __msg("Unreleased reference") +__naked void tracking_valid_pointer_value_comparison(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + r3 = 1; \ + if r6 == 0 goto l0_%=; \ + r3 = 0; \ + if r6 == 1234 goto l0_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: bpf_sk_release(btf_tcp_sock)") +__success +__retval(0) +__naked void sk_release_btf_tcp_sock(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_skc_to_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_skc_to_tcp_sock), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_skc_to_tcp_sock() after release") +__failure __msg("invalid mem access") +__naked void to_tcp_sock_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_skc_to_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + r0 = *(u8*)(r7 + 0); \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_skc_to_tcp_sock), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("socket") +__description("reference tracking: try to leak released ptr reg") +__success __failure_unpriv __msg_unpriv("R8 !read_ok") +__retval(0) +__naked void to_leak_released_ptr_reg(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u32*)(r10 - 4) = r0; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r9 = r0; \ + r0 = 0; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r8 = r0; \ + r1 = r8; \ + r2 = 0; \ + call %[bpf_ringbuf_discard]; \ + r0 = 0; \ + *(u64*)(r9 + 0) = r8; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_ringbuf_discard), + __imm(bpf_ringbuf_reserve), + __imm_addr(map_array_48b), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_reg_equal.c b/tools/testing/selftests/bpf/progs/verifier_reg_equal.c new file mode 100644 index 000000000000..dc1d8c30fb0e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_reg_equal.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("socket") +__description("check w reg equal if r reg upper32 bits 0") +__success +__naked void subreg_equality_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64 *)(r10 - 8) = r0; \ + r2 = *(u32 *)(r10 - 8); \ + /* At this point upper 4-bytes of r2 are 0, \ + * thus insn w3 = w2 should propagate reg id, \ + * and w2 < 9 comparison would also propagate \ + * the range for r3. \ + */ \ + w3 = w2; \ + if w2 < 9 goto l0_%=; \ + exit; \ +l0_%=: if r3 < 9 goto l1_%=; \ + /* r1 read is illegal at this point */ \ + r0 -= r1; \ +l1_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check w reg not equal if r reg upper32 bits not 0") +__failure __msg("R1 !read_ok") +__naked void subreg_equality_2(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + r2 = r0; \ + /* Upper 4-bytes of r2 may not be 0, thus insn \ + * w3 = w2 should not propagate reg id, and \ + * w2 < 9 comparison should not propagate \ + * the range for r3 either. \ + */ \ + w3 = w2; \ + if w2 < 9 goto l0_%=; \ + exit; \ +l0_%=: if r3 < 9 goto l1_%=; \ + /* r1 read is illegal at this point */ \ + r0 -= r1; \ +l1_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_regalloc.c b/tools/testing/selftests/bpf/progs/verifier_regalloc.c new file mode 100644 index 000000000000..ee5ddea87c91 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_regalloc.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/regalloc.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("tracepoint") +__description("regalloc basic") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_basic(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 20 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc negative") +__failure __msg("invalid access to map value, value_size=48 off=48 size=1") +__naked void regalloc_negative(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 24 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u8*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc src_reg mark") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_src_reg_mark(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 20 goto l0_%=; \ + r3 = 0; \ + if r3 s>= r2 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc src_reg negative") +__failure __msg("invalid access to map value, value_size=48 off=44 size=8") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_src_reg_negative(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 22 goto l0_%=; \ + r3 = 0; \ + if r3 s>= r2 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc and spill") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_and_spill(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 20 goto l0_%=; \ + /* r0 has upper bound that should propagate into r2 */\ + *(u64*)(r10 - 8) = r2; /* spill r2 */ \ + r0 = 0; \ + r2 = 0; /* clear r0 and r2 */\ + r3 = *(u64*)(r10 - 8); /* fill r3 */ \ + if r0 s>= r3 goto l0_%=; \ + /* r3 has lower and upper bounds */ \ + r7 += r3; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc and spill negative") +__failure __msg("invalid access to map value, value_size=48 off=48 size=8") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_and_spill_negative(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 48 goto l0_%=; \ + /* r0 has upper bound that should propagate into r2 */\ + *(u64*)(r10 - 8) = r2; /* spill r2 */ \ + r0 = 0; \ + r2 = 0; /* clear r0 and r2 */\ + r3 = *(u64*)(r10 - 8); /* fill r3 */\ + if r0 s>= r3 goto l0_%=; \ + /* r3 has lower and upper bounds */ \ + r7 += r3; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc three regs") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_three_regs(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + r4 = r2; \ + if r0 s> 12 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r7 += r4; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc after call") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_after_call(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r8 = r0; \ + r9 = r0; \ + call regalloc_after_call__1; \ + if r8 s> 20 goto l0_%=; \ + if r9 s< 0 goto l0_%=; \ + r7 += r8; \ + r7 += r9; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void regalloc_after_call__1(void) +{ + asm volatile (" \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("regalloc in callee") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_in_callee(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r2 = r0; \ + r3 = r7; \ + call regalloc_in_callee__1; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void regalloc_in_callee__1(void) +{ + asm volatile (" \ + if r1 s> 20 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r3 += r1; \ + r3 += r2; \ + r0 = *(u64*)(r3 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("regalloc, spill, JEQ") +__success +__naked void regalloc_spill_jeq(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + *(u64*)(r10 - 8) = r0; /* spill r0 */ \ + if r0 == 0 goto l0_%=; \ +l0_%=: /* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */\ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r2 == 20 goto l1_%=; \ +l1_%=: /* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */\ + r3 = *(u64*)(r10 - 8); /* fill r3 with map_value */\ + if r3 == 0 goto l2_%=; /* skip ldx if map_value == NULL */\ + /* Buggy verifier will think that r3 == 20 here */\ + r0 = *(u64*)(r3 + 0); /* read from map_value */\ +l2_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c b/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c new file mode 100644 index 000000000000..27ebfc1fd9ee --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/runtime_jit.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +void dummy_prog_42_socket(void); +void dummy_prog_24_socket(void); +void dummy_prog_loop1_socket(void); +void dummy_prog_loop2_socket(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_socket SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_socket, + [1] = (void *)&dummy_prog_loop1_socket, + [2] = (void *)&dummy_prog_24_socket, + }, +}; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 8); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog2_socket SEC(".maps") = { + .values = { + [1] = (void *)&dummy_prog_loop2_socket, + [2] = (void *)&dummy_prog_24_socket, + [7] = (void *)&dummy_prog_42_socket, + }, +}; + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_42_socket(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_24_socket(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop1_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop2_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog2_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog2_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, prog once") +__success __success_unpriv __retval(42) +__naked void call_within_bounds_prog_once(void) +{ + asm volatile (" \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, prog loop") +__success __success_unpriv __retval(41) +__naked void call_within_bounds_prog_loop(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, no prog") +__success __success_unpriv __retval(1) +__naked void call_within_bounds_no_prog(void) +{ + asm volatile (" \ + r3 = 3; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 2") +__success __success_unpriv __retval(24) +__naked void call_within_bounds_key_2(void) +{ + asm volatile (" \ + r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 2 / key 2, first branch") +__success __success_unpriv __retval(24) +__naked void _2_key_2_first_branch(void) +{ + asm volatile (" \ + r0 = 13; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 2 / key 2, second branch") +__success __success_unpriv __retval(24) +__naked void _2_key_2_second_branch(void) +{ + asm volatile (" \ + r0 = 14; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 0 / key 2, first branch") +__success __success_unpriv __retval(24) +__naked void _0_key_2_first_branch(void) +{ + asm volatile (" \ + r0 = 13; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 0 / key 2, second branch") +__success __success_unpriv __retval(42) +__naked void _0_key_2_second_branch(void) +{ + asm volatile (" \ + r0 = 14; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, different maps, first branch") +__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") +__retval(1) +__naked void bounds_different_maps_first_branch(void) +{ + asm volatile (" \ + r0 = 13; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 0; \ + r2 = %[map_prog2_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, different maps, second branch") +__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") +__retval(42) +__naked void bounds_different_maps_second_branch(void) +{ + asm volatile (" \ + r0 = 14; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 0; \ + r2 = %[map_prog2_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call out of bounds") +__success __success_unpriv __retval(2) +__naked void tail_call_out_of_bounds(void) +{ + asm volatile (" \ + r3 = 256; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 2; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: pass negative index to tail_call") +__success __success_unpriv __retval(2) +__naked void negative_index_to_tail_call(void) +{ + asm volatile (" \ + r3 = -1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 2; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: pass > 32bit index to tail_call") +__success __success_unpriv __retval(42) +/* Verifier rewrite for unpriv skips tail call here. */ +__retval_unpriv(2) +__naked void _32bit_index_to_tail_call(void) +{ + asm volatile (" \ + r3 = 0x100000000 ll; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 2; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_search_pruning.c b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c new file mode 100644 index 000000000000..5a14498d352f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("pointer/scalar confusion in state equality check (way 1)") +__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value") +__retval(POINTER_VALUE) +__naked void state_equality_check_way_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u64*)(r0 + 0); \ + goto l1_%=; \ +l0_%=: r0 = r10; \ +l1_%=: goto l2_%=; \ +l2_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("pointer/scalar confusion in state equality check (way 2)") +__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value") +__retval(POINTER_VALUE) +__naked void state_equality_check_way_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + r0 = r10; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r0 + 0); \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("lwt_in") +__description("liveness pruning and write screening") +__failure __msg("R0 !read_ok") +__naked void liveness_pruning_and_write_screening(void) +{ + asm volatile (" \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* branch conditions teach us nothing about R2 */\ + if r2 >= 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r2 >= 0 goto l1_%=; \ + r0 = 0; \ +l1_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("varlen_map_value_access pruning") +__failure __msg("R0 unbounded memory access") +__failure_unpriv __msg_unpriv("R0 leaks addr") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void varlen_map_value_access_pruning(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r0 + 0); \ + w2 = %[max_entries]; \ + if r2 s> r1 goto l1_%=; \ + w1 = 0; \ +l1_%=: w1 <<= 2; \ + r0 += r1; \ + goto l2_%=; \ +l2_%=: r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("search pruning: all branches should be verified (nop operation)") +__failure __msg("R6 invalid mem access 'scalar'") +__naked void should_be_verified_nop_operation(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = *(u64*)(r0 + 0); \ + if r3 == 0xbeef goto l1_%=; \ + r4 = 0; \ + goto l2_%=; \ +l1_%=: r4 = 1; \ +l2_%=: *(u64*)(r10 - 16) = r4; \ + call %[bpf_ktime_get_ns]; \ + r5 = *(u64*)(r10 - 16); \ + if r5 == 0 goto l0_%=; \ + r6 = 0; \ + r1 = 0xdead; \ + *(u64*)(r6 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("search pruning: all branches should be verified (invalid stack access)") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid read from stack off -16+0 size 8") +__retval(0) +__naked void be_verified_invalid_stack_access(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = *(u64*)(r0 + 0); \ + r4 = 0; \ + if r3 == 0xbeef goto l1_%=; \ + *(u64*)(r10 - 16) = r4; \ + goto l2_%=; \ +l1_%=: *(u64*)(r10 - 24) = r4; \ +l2_%=: call %[bpf_ktime_get_ns]; \ + r5 = *(u64*)(r10 - 16); \ +l0_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("precision tracking for u32 spill/fill") +__failure __msg("R0 min value is outside of the allowed memory range") +__naked void tracking_for_u32_spill_fill(void) +{ + asm volatile (" \ + r7 = r1; \ + call %[bpf_get_prandom_u32]; \ + w6 = 32; \ + if r0 == 0 goto l0_%=; \ + w6 = 4; \ +l0_%=: /* Additional insns to introduce a pruning point. */\ + call %[bpf_get_prandom_u32]; \ + r3 = 0; \ + r3 = 0; \ + if r0 == 0 goto l1_%=; \ + r3 = 0; \ +l1_%=: /* u32 spill/fill */ \ + *(u32*)(r10 - 8) = r6; \ + r8 = *(u32*)(r10 - 8); \ + /* out-of-bound map value access for r6=32 */ \ + r1 = 0; \ + *(u64*)(r10 - 16) = r1; \ + r2 = r10; \ + r2 += -16; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r0 += r8; \ + r1 = *(u32*)(r0 + 0); \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("precision tracking for u32 spills, u64 fill") +__failure __msg("div by zero") +__naked void for_u32_spills_u64_fill(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + w7 = 0xffffffff; \ + /* Additional insns to introduce a pruning point. */\ + r3 = 1; \ + r3 = 1; \ + r3 = 1; \ + r3 = 1; \ + call %[bpf_get_prandom_u32]; \ + if r0 == 0 goto l0_%=; \ + r3 = 1; \ +l0_%=: w3 /= 0; \ + /* u32 spills, u64 fill */ \ + *(u32*)(r10 - 4) = r6; \ + *(u32*)(r10 - 8) = r7; \ + r8 = *(u64*)(r10 - 8); \ + /* if r8 != X goto pc+1 r8 known in fallthrough branch */\ + if r8 != 0xffffffff goto l1_%=; \ + r3 = 1; \ +l1_%=: /* if r8 == X goto pc+1 condition always true on first\ + * traversal, so starts backtracking to mark r8 as requiring\ + * precision. r7 marked as needing precision. r6 not marked\ + * since it's not tracked. \ + */ \ + if r8 == 0xffffffff goto l2_%=; \ + /* fails if r8 correctly marked unknown after fill. */\ + w3 /= 0; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("allocated_stack") +__success __msg("processed 15 insns") +__success_unpriv __msg_unpriv("") __log_level(1) __retval(0) +__naked void allocated_stack(void) +{ + asm volatile (" \ + r6 = r1; \ + call %[bpf_get_prandom_u32]; \ + r7 = r0; \ + if r0 == 0 goto l0_%=; \ + r0 = 0; \ + *(u64*)(r10 - 8) = r6; \ + r6 = *(u64*)(r10 - 8); \ + *(u8*)(r10 - 9) = r7; \ + r7 = *(u8*)(r10 - 9); \ +l0_%=: if r0 != 0 goto l1_%=; \ +l1_%=: if r0 != 0 goto l2_%=; \ +l2_%=: if r0 != 0 goto l3_%=; \ +l3_%=: if r0 != 0 goto l4_%=; \ +l4_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* The test performs a conditional 64-bit write to a stack location + * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], + * then data is read from fp[-8]. This sequence is unsafe. + * + * The test would be mistakenly marked as safe w/o dst register parent + * preservation in verifier.c:copy_register_state() function. + * + * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the + * checkpoint state after conditional 64-bit assignment. + */ + +SEC("socket") +__description("write tracking and register parent chain bug") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid read from stack off -8+1 size 8") +__retval(0) __flag(BPF_F_TEST_STATE_FREQ) +__naked void and_register_parent_chain_bug(void) +{ + asm volatile (" \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* r0 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + /* if r0 > r6 goto +1 */ \ + if r0 > r6 goto l0_%=; \ + /* *(u64 *)(r10 - 8) = 0xdeadbeef */ \ + r0 = 0xdeadbeef; \ + *(u64*)(r10 - 8) = r0; \ +l0_%=: r1 = 42; \ + *(u8*)(r10 - 8) = r1; \ + r2 = *(u64*)(r10 - 8); \ + /* exit(0) */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c new file mode 100644 index 000000000000..ee76b51005ab --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_sock.c @@ -0,0 +1,980 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/sock.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) + +struct { + __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} map_reuseport_array SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKHASH); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_sockhash SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_sockmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_xskmap SEC(".maps"); + +struct val { + int cnt; + struct bpf_spin_lock l; +}; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(max_entries, 0); + __type(key, int); + __type(value, struct val); + __uint(map_flags, BPF_F_NO_PREALLOC); +} sk_storage_map SEC(".maps"); + +SEC("cgroup/skb") +__description("skb->sk: no NULL check") +__failure __msg("invalid mem access 'sock_common_or_null'") +__failure_unpriv +__naked void skb_sk_no_null_check(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + r0 = *(u32*)(r1 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("skb->sk: sk->family [non fullsock field]") +__success __success_unpriv __retval(0) +__naked void sk_family_non_fullsock_field_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("skb->sk: sk->type [fullsock field]") +__failure __msg("invalid sock_common access") +__failure_unpriv +__naked void sk_sk_type_fullsock_field_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_sk_fullsock(skb->sk): no !skb->sk check") +__failure __msg("type=sock_common_or_null expected=sock_common") +__failure_unpriv +__naked void sk_no_skb_sk_check_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + call %[bpf_sk_fullsock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): no NULL check on ret") +__failure __msg("invalid mem access 'sock_or_null'") +__failure_unpriv +__naked void no_null_check_on_ret_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->type [fullsock field]") +__success __success_unpriv __retval(0) +__naked void sk_sk_type_fullsock_field_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->family [non fullsock field]") +__success __success_unpriv __retval(0) +__naked void sk_family_non_fullsock_field_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->state [narrow load]") +__success __success_unpriv __retval(0) +__naked void sk_sk_state_narrow_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)") +__success __success_unpriv __retval(0) +__naked void port_word_load_backward_compatibility(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [half load]") +__success __success_unpriv __retval(0) +__naked void sk_dst_port_half_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void dst_port_half_load_invalid_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [byte load]") +__success __success_unpriv __retval(0) +__naked void sk_dst_port_byte_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \ + r2 = *(u8*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void dst_port_byte_load_invalid(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void dst_port_half_load_invalid_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]") +__success __success_unpriv __retval(0) +__naked void dst_ip6_load_2nd_byte(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->type [narrow load]") +__success __success_unpriv __retval(0) +__naked void sk_sk_type_narrow_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->protocol [narrow load]") +__success __success_unpriv __retval(0) +__naked void sk_sk_protocol_narrow_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): beyond last field") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void skb_sk_beyond_last_field_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): no !skb->sk check") +__failure __msg("type=sock_common_or_null expected=sock_common") +__failure_unpriv +__naked void sk_no_skb_sk_check_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + call %[bpf_tcp_sock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): no NULL check on ret") +__failure __msg("invalid mem access 'tcp_sock_or_null'") +__failure_unpriv +__naked void no_null_check_on_ret_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): tp->snd_cwnd") +__success __success_unpriv __retval(0) +__naked void skb_sk_tp_snd_cwnd_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): tp->bytes_acked") +__success __success_unpriv __retval(0) +__naked void skb_sk_tp_bytes_acked(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): beyond last field") +__failure __msg("invalid tcp_sock access") +__failure_unpriv +__naked void skb_sk_beyond_last_field_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd") +__success __success_unpriv __retval(0) +__naked void skb_sk_tp_snd_cwnd_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l2_%=; \ + exit; \ +l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_sk_release(skb->sk)") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_sk_release_skb_sk(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_release), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_sk_fullsock_skb_sk(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_release), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_sk_release(bpf_tcp_sock(skb->sk))") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_tcp_sock_skb_sk(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL") +__success __retval(0) +__naked void sk_null_0_value_null(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r4 = 0; \ + r3 = 0; \ + r2 = r0; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_sk_storage_get]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_storage_get), + __imm_addr(sk_storage_map), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("sk_storage_get(map, skb->sk, 1, 1): value == 1") +__failure __msg("R3 type=scalar expected=fp") +__naked void sk_1_1_value_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r4 = 1; \ + r3 = 1; \ + r2 = r0; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_sk_storage_get]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_storage_get), + __imm_addr(sk_storage_map), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value") +__success __retval(0) +__naked void stack_value_1_stack_value(void) +{ + asm volatile (" \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r4 = 1; \ + r3 = r10; \ + r3 += -8; \ + r2 = r0; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_sk_storage_get]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_storage_get), + __imm_addr(sk_storage_map), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_map_lookup_elem(smap, &key)") +__failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem") +__naked void map_lookup_elem_smap_key(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(sk_storage_map) + : __clobber_all); +} + +SEC("xdp") +__description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id") +__success __retval(0) +__naked void xskmap_key_xs_queue_id(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_xskmap] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_xskmap), + __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id)) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockmap, &key)") +__failure __msg("Unreleased reference id=2 alloc_insn=6") +__naked void map_lookup_elem_sockmap_key(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockmap] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_sockmap) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockhash, &key)") +__failure __msg("Unreleased reference id=2 alloc_insn=6") +__naked void map_lookup_elem_sockhash_key(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockhash] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_sockhash) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)") +__success +__naked void field_bpf_sk_release_sk_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockmap] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = r0; \ + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_sk_release), + __imm_addr(map_sockmap), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)") +__success +__naked void field_bpf_sk_release_sk_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockhash] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = r0; \ + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_sk_release), + __imm_addr(map_sockhash), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)") +__success +__naked void ctx_reuseport_array_key_flags(void) +{ + asm volatile (" \ + r4 = 0; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r3 = r10; \ + r3 += -4; \ + r2 = %[map_reuseport_array] ll; \ + call %[bpf_sk_select_reuseport]; \ + exit; \ +" : + : __imm(bpf_sk_select_reuseport), + __imm_addr(map_reuseport_array) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)") +__success +__naked void reuseport_ctx_sockmap_key_flags(void) +{ + asm volatile (" \ + r4 = 0; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r3 = r10; \ + r3 += -4; \ + r2 = %[map_sockmap] ll; \ + call %[bpf_sk_select_reuseport]; \ + exit; \ +" : + : __imm(bpf_sk_select_reuseport), + __imm_addr(map_sockmap) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)") +__success +__naked void reuseport_ctx_sockhash_key_flags(void) +{ + asm volatile (" \ + r4 = 0; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r3 = r10; \ + r3 += -4; \ + r2 = %[map_sockmap] ll; \ + call %[bpf_sk_select_reuseport]; \ + exit; \ +" : + : __imm(bpf_sk_select_reuseport), + __imm_addr(map_sockmap) + : __clobber_all); +} + +SEC("tc") +__description("mark null check on return value of bpf_skc_to helpers") +__failure __msg("invalid mem access") +__naked void of_bpf_skc_to_helpers(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r6 = r1; \ + call %[bpf_skc_to_tcp_sock]; \ + r7 = r0; \ + r1 = r6; \ + call %[bpf_skc_to_tcp_request_sock]; \ + r8 = r0; \ + if r8 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r7 + 0); \ + exit; \ +" : + : __imm(bpf_skc_to_tcp_request_sock), + __imm(bpf_skc_to_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c new file mode 100644 index 000000000000..9c1aa69650f8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/spin_lock.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct val { + int cnt; + struct bpf_spin_lock l; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct val); +} map_spin_lock SEC(".maps"); + +SEC("cgroup/skb") +__description("spin_lock: test1 success") +__success __failure_unpriv __msg_unpriv("") +__retval(0) +__naked void spin_lock_test1_success(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test2 direct ld/st") +__failure __msg("cannot be accessed directly") +__failure_unpriv __msg_unpriv("") +__naked void lock_test2_direct_ld_st(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r1 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test3 direct ld/st") +__failure __msg("cannot be accessed directly") +__failure_unpriv __msg_unpriv("") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void lock_test3_direct_ld_st(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 1); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test4 direct ld/st") +__failure __msg("cannot be accessed directly") +__failure_unpriv __msg_unpriv("") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void lock_test4_direct_ld_st(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u16*)(r6 + 3); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test5 call within a locked region") +__failure __msg("calls are not allowed") +__failure_unpriv __msg_unpriv("") +__naked void call_within_a_locked_region(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + call %[bpf_get_prandom_u32]; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test6 missing unlock") +__failure __msg("unlock is missing") +__failure_unpriv __msg_unpriv("") +__naked void spin_lock_test6_missing_unlock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + if r0 != 0 goto l1_%=; \ + call %[bpf_spin_unlock]; \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test7 unlock without lock") +__failure __msg("without taking a lock") +__failure_unpriv __msg_unpriv("") +__naked void lock_test7_unlock_without_lock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + if r1 != 0 goto l1_%=; \ + call %[bpf_spin_lock]; \ +l1_%=: r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test8 double lock") +__failure __msg("calls are not allowed") +__failure_unpriv __msg_unpriv("") +__naked void spin_lock_test8_double_lock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test9 different lock") +__failure __msg("unlock of different lock") +__failure_unpriv __msg_unpriv("") +__naked void spin_lock_test9_different_lock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test10 lock in subprog without unlock") +__failure __msg("unlock is missing") +__failure_unpriv __msg_unpriv("") +__naked void lock_in_subprog_without_unlock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call lock_in_subprog_without_unlock__1; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void lock_in_subprog_without_unlock__1(void) +{ + asm volatile (" \ + call %[bpf_spin_lock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_spin_lock) + : __clobber_all); +} + +SEC("tc") +__description("spin_lock: test11 ld_abs under lock") +__failure __msg("inside bpf_spin_lock") +__naked void test11_ld_abs_under_lock(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r7 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r0 = *(u8*)skb[0]; \ + r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("tc") +__description("spin_lock: regsafe compare reg->id for map value") +__failure __msg("bpf_spin_unlock of different lock") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void reg_id_for_map_value(void) +{ + asm volatile (" \ + r6 = r1; \ + r6 = *(u32*)(r6 + %[__sk_buff_mark]); \ + r1 = %[map_spin_lock] ll; \ + r9 = r1; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r2 = r10; \ + r2 += -4; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r7 = r0; \ + r1 = r9; \ + r2 = r10; \ + r2 += -4; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r8 = r0; \ + r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + if r6 == 0 goto l2_%=; \ + goto l3_%=; \ +l2_%=: r7 = r8; \ +l3_%=: r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +/* Make sure that regsafe() compares ids for spin lock records using + * check_ids(): + * 1: r9 = map_lookup_elem(...) ; r9.id == 1 + * 2: r8 = map_lookup_elem(...) ; r8.id == 2 + * 3: r7 = ktime_get_ns() + * 4: r6 = ktime_get_ns() + * 5: if r6 > r7 goto <9> + * 6: spin_lock(r8) + * 7: r9 = r8 + * 8: goto <10> + * 9: spin_lock(r9) + * 10: spin_unlock(r9) ; r9.id == 1 || r9.id == 2 and lock is active, + * ; second visit to (10) should be considered safe + * ; if check_ids() is used. + * 11: exit(0) + */ + +SEC("cgroup/skb") +__description("spin_lock: regsafe() check_ids() similar id mappings") +__success __msg("29: safe") +__failure_unpriv __msg_unpriv("") +__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ) +__naked void check_ids_similar_id_mappings(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + /* r9 = map_lookup_elem(...) */ \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r9 = r0; \ + /* r8 = map_lookup_elem(...) */ \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l1_%=; \ + r8 = r0; \ + /* r7 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r7 = r0; \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* if r6 > r7 goto +5 ; no new information about the state is derived from\ + * ; this check, thus produced verifier states differ\ + * ; only in 'insn_idx' \ + * spin_lock(r8) \ + * r9 = r8 \ + * goto unlock \ + */ \ + if r6 > r7 goto l2_%=; \ + r1 = r8; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r9 = r8; \ + goto l3_%=; \ +l2_%=: /* spin_lock(r9) */ \ + r1 = r9; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ +l3_%=: /* spin_unlock(r9) */ \ + r1 = r9; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ +l0_%=: /* exit(0) */ \ + r0 = 0; \ +l1_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_subreg.c b/tools/testing/selftests/bpf/progs/verifier_subreg.c new file mode 100644 index 000000000000..8613ea160dcd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_subreg.c @@ -0,0 +1,673 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/subreg.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +/* This file contains sub-register zero extension checks for insns defining + * sub-registers, meaning: + * - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width + * forms (BPF_END) could define sub-registers. + * - Narrow direct loads, BPF_B/H/W | BPF_LDX. + * - BPF_LD is not exposed to JIT back-ends, so no need for testing. + * + * "get_prandom_u32" is used to initialize low 32-bit of some registers to + * prevent potential optimizations done by verifier or JIT back-ends which could + * optimize register back into constant when range info shows one register is a + * constant. + */ + +SEC("socket") +__description("add32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void add32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000000 ll; \ + w0 += w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("add32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void add32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + /* An insn could have no effect on the low 32-bit, for example:\ + * a = a + 0 \ + * a = a | 0 \ + * a = a & -1 \ + * But, they should still zero high 32-bit. \ + */ \ + w0 += 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 += -2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("sub32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void sub32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x1ffffffff ll; \ + w0 -= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("sub32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void sub32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 -= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 -= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mul32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void mul32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000001 ll; \ + w0 *= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mul32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void mul32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 *= 1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 *= -1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("div32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void div32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = -1; \ + w0 /= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("div32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void div32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 /= 1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 /= 2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("or32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void or32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000001 ll; \ + w0 |= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("or32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void or32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 |= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 |= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("and32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void and32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x100000000 ll; \ + r1 |= r0; \ + r0 = 0x1ffffffff ll; \ + w0 &= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("and32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void and32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 &= -1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 &= -2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("lsh32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void lsh32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x100000000 ll; \ + r0 |= r1; \ + r1 = 1; \ + w0 <<= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("lsh32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void lsh32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 <<= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 <<= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("rsh32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void rsh32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r1 = 1; \ + w0 >>= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("rsh32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void rsh32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 >>= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 >>= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("neg32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void neg32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 = -w0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mod32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void mod32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = -1; \ + w0 %%= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mod32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void mod32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 %%= 1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 %%= 2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("xor32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void xor32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000000 ll; \ + w0 ^= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("xor32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void xor32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 ^= 1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mov32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void mov32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x100000000 ll; \ + r1 |= r0; \ + r0 = 0x100000000 ll; \ + w0 = w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mov32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void mov32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 = 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 = 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("arsh32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void arsh32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r1 = 1; \ + w0 s>>= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("arsh32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void arsh32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 s>>= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 s>>= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end16 (to_le) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void le_reg_zero_extend_check_1(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = le16 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end32 (to_le) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void le_reg_zero_extend_check_2(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = le32 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end16 (to_be) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void be_reg_zero_extend_check_1(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = be16 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end32 (to_be) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void be_reg_zero_extend_check_2(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = be32 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("ldx_b zero extend check") +__success __success_unpriv __retval(0) +__naked void ldx_b_zero_extend_check(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -4; \ + r7 = 0xfaceb00c; \ + *(u32*)(r6 + 0) = r7; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r0 = *(u8*)(r6 + 0); \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("ldx_h zero extend check") +__success __success_unpriv __retval(0) +__naked void ldx_h_zero_extend_check(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -4; \ + r7 = 0xfaceb00c; \ + *(u32*)(r6 + 0) = r7; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r0 = *(u16*)(r6 + 0); \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("ldx_w zero extend check") +__success __success_unpriv __retval(0) +__naked void ldx_w_zero_extend_check(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -4; \ + r7 = 0xfaceb00c; \ + *(u32*)(r6 + 0) = r7; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r0 = *(u32*)(r6 + 0); \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c new file mode 100644 index 000000000000..7ea535bfbacd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c @@ -0,0 +1,726 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +#define BPF_SK_LOOKUP(func) \ + /* struct bpf_sock_tuple tuple = {} */ \ + "r2 = 0;" \ + "*(u32*)(r10 - 8) = r2;" \ + "*(u64*)(r10 - 16) = r2;" \ + "*(u64*)(r10 - 24) = r2;" \ + "*(u64*)(r10 - 32) = r2;" \ + "*(u64*)(r10 - 40) = r2;" \ + "*(u64*)(r10 - 48) = r2;" \ + /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \ + "r2 = r10;" \ + "r2 += -48;" \ + "r3 = %[sizeof_bpf_sock_tuple];"\ + "r4 = 0;" \ + "r5 = 0;" \ + "call %[" #func "];" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +void dummy_prog_42_socket(void); +void dummy_prog_24_socket(void); +void dummy_prog_loop1_socket(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_socket SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_socket, + [1] = (void *)&dummy_prog_loop1_socket, + [2] = (void *)&dummy_prog_24_socket, + }, +}; + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_42_socket(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_24_socket(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop1_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: return pointer") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(POINTER_VALUE) +__naked void unpriv_return_pointer(void) +{ + asm volatile (" \ + r0 = r10; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: add const to pointer") +__success __success_unpriv __retval(0) +__naked void unpriv_add_const_to_pointer(void) +{ + asm volatile (" \ + r1 += 8; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: add pointer to pointer") +__failure __msg("R1 pointer += pointer") +__failure_unpriv +__naked void unpriv_add_pointer_to_pointer(void) +{ + asm volatile (" \ + r1 += r10; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: neg pointer") +__success __failure_unpriv __msg_unpriv("R1 pointer arithmetic") +__retval(0) +__naked void unpriv_neg_pointer(void) +{ + asm volatile (" \ + r1 = -r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp pointer with const") +__success __failure_unpriv __msg_unpriv("R1 pointer comparison") +__retval(0) +__naked void unpriv_cmp_pointer_with_const(void) +{ + asm volatile (" \ + if r1 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp pointer with pointer") +__success __failure_unpriv __msg_unpriv("R10 pointer comparison") +__retval(0) +__naked void unpriv_cmp_pointer_with_pointer(void) +{ + asm volatile (" \ + if r1 == r10 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("unpriv: check that printk is disallowed") +__success +__naked void check_that_printk_is_disallowed(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = r10; \ + r1 += -8; \ + r2 = 8; \ + r3 = r1; \ + call %[bpf_trace_printk]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_trace_printk) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: pass pointer to helper function") +__success __failure_unpriv __msg_unpriv("R4 leaks addr") +__retval(0) +__naked void pass_pointer_to_helper_function(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + r3 = r2; \ + r4 = r2; \ + call %[bpf_map_update_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_update_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: indirectly pass pointer on stack to helper function") +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R2 off -8+0 size 8") +__retval(0) +__naked void on_stack_to_helper_function(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: mangle pointer on stack 1") +__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") +__retval(0) +__naked void mangle_pointer_on_stack_1(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r0 = 0; \ + *(u32*)(r10 - 8) = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: mangle pointer on stack 2") +__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") +__retval(0) +__naked void mangle_pointer_on_stack_2(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r0 = 0; \ + *(u8*)(r10 - 1) = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: read pointer from stack in small chunks") +__failure __msg("invalid size") +__failure_unpriv +__naked void from_stack_in_small_chunks(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r0 = *(u32*)(r10 - 8); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: write pointer into ctx") +__failure __msg("invalid bpf_context access") +__failure_unpriv __msg_unpriv("R1 leaks addr") +__naked void unpriv_write_pointer_into_ctx(void) +{ + asm volatile (" \ + *(u64*)(r1 + 0) = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: spill/fill of ctx") +__success __success_unpriv __retval(0) +__naked void unpriv_spill_fill_of_ctx(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r1 = *(u64*)(r6 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of ctx 2") +__success __retval(0) +__naked void spill_fill_of_ctx_2(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r1 = *(u64*)(r6 + 0); \ + call %[bpf_get_hash_recalc]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_hash_recalc) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of ctx 3") +__failure __msg("R1 type=fp expected=ctx") +__naked void spill_fill_of_ctx_3(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + *(u64*)(r6 + 0) = r10; \ + r1 = *(u64*)(r6 + 0); \ + call %[bpf_get_hash_recalc]; \ + exit; \ +" : + : __imm(bpf_get_hash_recalc) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of ctx 4") +__failure __msg("R1 type=scalar expected=ctx") +__naked void spill_fill_of_ctx_4(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r0 = 1; \ + lock *(u64 *)(r10 - 8) += r0; \ + r1 = *(u64*)(r6 + 0); \ + call %[bpf_get_hash_recalc]; \ + exit; \ +" : + : __imm(bpf_get_hash_recalc) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx") +__failure __msg("same insn cannot be used with different pointers") +__naked void fill_of_different_pointers_stx(void) +{ + asm volatile (" \ + r3 = 42; \ + r6 = r10; \ + r6 += -8; \ + if r1 == 0 goto l0_%=; \ + r2 = r10; \ + r2 += -16; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: r1 = *(u64*)(r6 + 0); \ + *(u32*)(r1 + %[__sk_buff_mark]) = r3; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +/* Same as above, but use BPF_ST_MEM to save 42 + * instead of BPF_STX_MEM. + */ +SEC("tc") +__description("unpriv: spill/fill of different pointers st") +__failure __msg("same insn cannot be used with different pointers") +__naked void fill_of_different_pointers_st(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + if r1 == 0 goto l0_%=; \ + r2 = r10; \ + r2 += -16; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: r1 = *(u64*)(r6 + 0); \ + .8byte %[st_mem]; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_insn(st_mem, + BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - ctx and sock") +__failure __msg("type=ctx expected=sock") +__naked void pointers_stx_ctx_and_sock(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb == NULL) *target = sock; */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: /* else *target = skb; */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: /* struct __sk_buff *skb = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* skb->mark = 42; */ \ + r3 = 42; \ + *(u32*)(r1 + %[__sk_buff_mark]) = r3; \ + /* if (sk) bpf_sk_release(sk) */ \ + if r1 == 0 goto l2_%=; \ + call %[bpf_sk_release]; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - leak sock") +__failure +//.errstr = "same insn cannot be used with different pointers", +__msg("Unreleased reference") +__naked void different_pointers_stx_leak_sock(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb == NULL) *target = sock; */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: /* else *target = skb; */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: /* struct __sk_buff *skb = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* skb->mark = 42; */ \ + r3 = 42; \ + *(u32*)(r1 + %[__sk_buff_mark]) = r3; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - sock and ctx (read)") +__failure __msg("same insn cannot be used with different pointers") +__naked void stx_sock_and_ctx_read(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb) *target = skb */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r1; \ +l0_%=: /* else *target = sock */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r2; \ +l1_%=: /* struct bpf_sock *sk = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */\ + if r1 == 0 goto l2_%=; \ + r3 = *(u32*)(r1 + %[bpf_sock_mark]); \ + call %[bpf_sk_release]; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - sock and ctx (write)") +__failure +//.errstr = "same insn cannot be used with different pointers", +__msg("cannot write into sock") +__naked void stx_sock_and_ctx_write(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb) *target = skb */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r1; \ +l0_%=: /* else *target = sock */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r2; \ +l1_%=: /* struct bpf_sock *sk = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* if (sk) sk->mark = 42; bpf_sk_release(sk); */\ + if r1 == 0 goto l2_%=; \ + r3 = 42; \ + *(u32*)(r1 + %[bpf_sock_mark]) = r3; \ + call %[bpf_sk_release]; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: write pointer into map elem value") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void pointer_into_map_elem_value(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + *(u64*)(r0 + 0) = r0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("alu32: mov u32 const") +__success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'") +__retval(0) +__naked void alu32_mov_u32_const(void) +{ + asm volatile (" \ + w7 = 0; \ + w7 &= 1; \ + w0 = w7; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: partial copy of pointer") +__success __failure_unpriv __msg_unpriv("R10 partial copy") +__retval(0) +__naked void unpriv_partial_copy_of_pointer(void) +{ + asm volatile (" \ + w1 = w10; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: pass pointer to tail_call") +__success __failure_unpriv __msg_unpriv("R3 leaks addr into helper") +__retval(0) +__naked void pass_pointer_to_tail_call(void) +{ + asm volatile (" \ + r3 = r1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp map pointer with zero") +__success __failure_unpriv __msg_unpriv("R1 pointer comparison") +__retval(0) +__naked void cmp_map_pointer_with_zero(void) +{ + asm volatile (" \ + r1 = 0; \ + r1 = %[map_hash_8b] ll; \ + if r1 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: write into frame pointer") +__failure __msg("frame pointer is read only") +__failure_unpriv +__naked void unpriv_write_into_frame_pointer(void) +{ + asm volatile (" \ + r10 = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: spill/fill frame pointer") +__failure __msg("frame pointer is read only") +__failure_unpriv +__naked void unpriv_spill_fill_frame_pointer(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r10; \ + r10 = *(u64*)(r6 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp of frame pointer") +__success __failure_unpriv __msg_unpriv("R10 pointer comparison") +__retval(0) +__naked void unpriv_cmp_of_frame_pointer(void) +{ + asm volatile (" \ + if r10 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: adding of fp, reg") +__success __failure_unpriv +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__retval(0) +__naked void unpriv_adding_of_fp_reg(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = 0; \ + r1 += r10; \ + *(u64*)(r1 - 8) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: adding of fp, imm") +__success __failure_unpriv +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__retval(0) +__naked void unpriv_adding_of_fp_imm(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = r10; \ + r1 += 0; \ + *(u64*)(r1 - 8) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp of stack pointer") +__success __failure_unpriv __msg_unpriv("R2 pointer comparison") +__retval(0) +__naked void unpriv_cmp_of_stack_pointer(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + if r2 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c b/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c new file mode 100644 index 000000000000..4d77407a0a79 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("perf_event") +__description("unpriv: spill/fill of different pointers ldx") +__failure __msg("same insn cannot be used with different pointers") +__naked void fill_of_different_pointers_ldx(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + if r1 == 0 goto l0_%=; \ + r2 = r10; \ + r2 += %[__imm_0]; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: r1 = *(u64*)(r6 + 0); \ + r1 = *(u64*)(r1 + %[sample_period]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, + -(__s32) offsetof(struct bpf_perf_event_data, sample_period) - 8), + __imm_const(sample_period, + offsetof(struct bpf_perf_event_data, sample_period)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c new file mode 100644 index 000000000000..71814a753216 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value_illegal_alu.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("map element value illegal alu op, 1") +__failure __msg("R0 bitwise operator &= on pointer") +__failure_unpriv +__naked void value_illegal_alu_op_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 &= 8; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 2") +__failure __msg("R0 32-bit pointer arithmetic prohibited") +__failure_unpriv +__naked void value_illegal_alu_op_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + w0 += 0; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 3") +__failure __msg("R0 pointer arithmetic with /= operator") +__failure_unpriv +__naked void value_illegal_alu_op_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 /= 42; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 4") +__failure __msg("invalid mem access 'scalar'") +__failure_unpriv __msg_unpriv("R0 pointer arithmetic prohibited") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void value_illegal_alu_op_4(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 = be64 r0; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 5") +__failure __msg("R0 invalid mem access 'scalar'") +__msg_unpriv("leaking pointer from stack off -8") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void value_illegal_alu_op_5(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = 4096; \ + r2 = r10; \ + r2 += -8; \ + *(u64*)(r2 + 0) = r0; \ + lock *(u64 *)(r2 + 0) += r3; \ + r0 = *(u64*)(r2 + 0); \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c new file mode 100644 index 000000000000..5ba6e53571c8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c @@ -0,0 +1,1423 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value_ptr_arith.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <errno.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct other_val { + long long foo; + long long bar; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct other_val); +} map_hash_16b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs const") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void value_ptr_unknown_vs_const(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ + goto l4_%=; \ +l3_%=: r1 = 3; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr const vs unknown") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void value_ptr_const_vs_unknown(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 3; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr const vs const (ne)") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void ptr_const_vs_const_ne(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 3; \ + goto l4_%=; \ +l3_%=: r1 = 5; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr const vs const (eq)") +__success __success_unpriv __retval(1) +__naked void ptr_const_vs_const_eq(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 5; \ + goto l4_%=; \ +l3_%=: r1 = 5; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs unknown (eq)") +__success __success_unpriv __retval(1) +__naked void ptr_unknown_vs_unknown_eq(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs unknown (lt)") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void ptr_unknown_vs_unknown_lt(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x3; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs unknown (gt)") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void ptr_unknown_vs_unknown_gt(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x3; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr from different maps") +__success __success_unpriv __retval(1) +__naked void value_ptr_from_different_maps(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = 4; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar from different maps") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(1) +__naked void known_scalar_from_different_maps(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = 4; \ + r0 -= r1; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr from different maps, but same value properties") +__success __success_unpriv __retval(1) +__naked void maps_but_same_value_properties(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_48b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = 4; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_48b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: mixing value pointer and scalar, 1") +__success __failure_unpriv __msg_unpriv("R2 pointer comparison prohibited") +__retval(0) +__naked void value_pointer_and_scalar_1(void) +{ + asm volatile (" \ + /* load map value pointer into r0 and r2 */ \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r2 = r10; \ + r2 += -16; \ + r6 = 0; \ + *(u64*)(r10 - 16) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: /* load some number from the map into r1 */ \ + r1 = *(u8*)(r0 + 0); \ + /* depending on r1, branch: */ \ + if r1 != 0 goto l1_%=; \ + /* branch A */ \ + r2 = r0; \ + r3 = 0; \ + goto l2_%=; \ +l1_%=: /* branch B */ \ + r2 = 0; \ + r3 = 0x100000; \ +l2_%=: /* common instruction */ \ + r2 += r3; \ + /* depending on r1, branch: */ \ + if r1 != 0 goto l3_%=; \ + /* branch A */ \ + goto l4_%=; \ +l3_%=: /* branch B */ \ + r0 = 0x13371337; \ + /* verifier follows fall-through */ \ + if r2 != 0x100000 goto l4_%=; \ + r0 = 0; \ + exit; \ +l4_%=: /* fake-dead code; targeted from branch A to \ + * prevent dead code sanitization \ + */ \ + r0 = *(u8*)(r0 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: mixing value pointer and scalar, 2") +__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__retval(0) +__naked void value_pointer_and_scalar_2(void) +{ + asm volatile (" \ + /* load map value pointer into r0 and r2 */ \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r2 = r10; \ + r2 += -16; \ + r6 = 0; \ + *(u64*)(r10 - 16) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: /* load some number from the map into r1 */ \ + r1 = *(u8*)(r0 + 0); \ + /* depending on r1, branch: */ \ + if r1 == 0 goto l1_%=; \ + /* branch A */ \ + r2 = 0; \ + r3 = 0x100000; \ + goto l2_%=; \ +l1_%=: /* branch B */ \ + r2 = r0; \ + r3 = 0; \ +l2_%=: /* common instruction */ \ + r2 += r3; \ + /* depending on r1, branch: */ \ + if r1 != 0 goto l3_%=; \ + /* branch A */ \ + goto l4_%=; \ +l3_%=: /* branch B */ \ + r0 = 0x13371337; \ + /* verifier follows fall-through */ \ + if r2 != 0x100000 goto l4_%=; \ + r0 = 0; \ + exit; \ +l4_%=: /* fake-dead code; targeted from branch A to \ + * prevent dead code sanitization, rejected \ + * via branch B however \ + */ \ + r0 = *(u8*)(r0 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("sanitation: alu with different scalars 1") +__success __success_unpriv __retval(0x100000) +__naked void alu_with_different_scalars_1(void) +{ + asm volatile (" \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r2 = r10; \ + r2 += -16; \ + r6 = 0; \ + *(u64*)(r10 - 16) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u32*)(r0 + 0); \ + if r1 == 0 goto l1_%=; \ + r2 = 0; \ + r3 = 0x100000; \ + goto l2_%=; \ +l1_%=: r2 = 42; \ + r3 = 0x100001; \ +l2_%=: r2 += r3; \ + r0 = r2; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("sanitation: alu with different scalars 2") +__success __success_unpriv __retval(0) +__naked void alu_with_different_scalars_2(void) +{ + asm volatile (" \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r6 = r1; \ + r2 = r10; \ + r2 += -16; \ + r7 = 0; \ + *(u64*)(r10 - 16) = r7; \ + call %[bpf_map_delete_elem]; \ + r7 = r0; \ + r1 = r6; \ + r2 = r10; \ + r2 += -16; \ + call %[bpf_map_delete_elem]; \ + r6 = r0; \ + r8 = r6; \ + r8 += r7; \ + r0 = r8; \ + r0 += %[einval]; \ + r0 += %[einval]; \ + exit; \ +" : + : __imm(bpf_map_delete_elem), + __imm_addr(map_array_48b), + __imm_const(einval, EINVAL) + : __clobber_all); +} + +SEC("socket") +__description("sanitation: alu with different scalars 3") +__success __success_unpriv __retval(0) +__naked void alu_with_different_scalars_3(void) +{ + asm volatile (" \ + r0 = %[einval]; \ + r0 *= -1; \ + r7 = r0; \ + r0 = %[einval]; \ + r0 *= -1; \ + r6 = r0; \ + r8 = r6; \ + r8 += r7; \ + r0 = r8; \ + r0 += %[einval]; \ + r0 += %[einval]; \ + exit; \ +" : + : __imm_const(einval, EINVAL) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, upper oob arith, test 1") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void upper_oob_arith_test_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 48; \ + r0 += r1; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, upper oob arith, test 2") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void upper_oob_arith_test_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 49; \ + r0 += r1; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, upper oob arith, test 3") +__success __success_unpriv __retval(1) +__naked void upper_oob_arith_test_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, lower oob arith, test 1") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__naked void lower_oob_arith_test_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r1 = 48; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, lower oob arith, test 2") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void lower_oob_arith_test_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r1 = 48; \ + r0 -= r1; \ + r1 = 1; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, lower oob arith, test 3") +__success __success_unpriv __retval(1) +__naked void lower_oob_arith_test_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r1 = 47; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr") +__success __success_unpriv __retval(1) +__naked void access_known_scalar_value_ptr_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 1") +__success __success_unpriv __retval(1) +__naked void value_ptr_known_scalar_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 2") +__failure __msg("invalid access to map value") +__failure_unpriv +__naked void value_ptr_known_scalar_2_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 49; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 3") +__failure __msg("invalid access to map value") +__failure_unpriv +__naked void value_ptr_known_scalar_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = -1; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 4") +__success __success_unpriv __retval(1) +__naked void value_ptr_known_scalar_4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 5; \ + r0 += r1; \ + r1 = -2; \ + r0 += r1; \ + r1 = -1; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 5") +__success __success_unpriv __retval(0xabcdef12) +__naked void value_ptr_known_scalar_5(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[__imm_0]; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_const(__imm_0, (6 + 1) * sizeof(int)) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 6") +__success __success_unpriv __retval(0xabcdef12) +__naked void value_ptr_known_scalar_6(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[__imm_0]; \ + r0 += r1; \ + r1 = %[__imm_1]; \ + r0 += r1; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_const(__imm_0, (3 + 1) * sizeof(int)), + __imm_const(__imm_1, 3 * sizeof(int)) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += N, value_ptr -= N known scalar") +__success __success_unpriv __retval(0x12345678) +__naked void value_ptr_n_known_scalar(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + w1 = 0x12345678; \ + *(u32*)(r0 + 0) = r1; \ + r0 += 2; \ + r1 = 2; \ + r0 -= r1; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 1") +__success __success_unpriv __retval(1) +__naked void unknown_scalar_value_ptr_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 2") +__success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) +__naked void unknown_scalar_value_ptr_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 3") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) +__naked void unknown_scalar_value_ptr_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = -1; \ + r0 += r1; \ + r1 = 1; \ + r0 += r1; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 4") +__failure __msg("R1 max value is outside of the allowed memory range") +__msg_unpriv("R1 pointer arithmetic of map value goes out of range") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void unknown_scalar_value_ptr_4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 19; \ + r0 += r1; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += unknown scalar, 1") +__success __success_unpriv __retval(1) +__naked void value_ptr_unknown_scalar_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += unknown scalar, 2") +__success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) +__naked void value_ptr_unknown_scalar_2_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r0 += r1; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += unknown scalar, 3") +__success __success_unpriv __retval(1) +__naked void value_ptr_unknown_scalar_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r0 + 0); \ + r2 = *(u64*)(r0 + 8); \ + r3 = *(u64*)(r0 + 16); \ + r1 &= 0xf; \ + r3 &= 1; \ + r3 |= 1; \ + if r2 > r3 goto l0_%=; \ + r0 += r3; \ + r0 = *(u8*)(r0 + 0); \ + r0 = 1; \ +l1_%=: exit; \ +l0_%=: r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += value_ptr") +__failure __msg("R0 pointer += pointer prohibited") +__failure_unpriv +__naked void access_value_ptr_value_ptr_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 += r0; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar -= value_ptr") +__failure __msg("R1 tried to subtract pointer from scalar") +__failure_unpriv +__naked void access_known_scalar_value_ptr_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r1 -= r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__naked void access_value_ptr_known_scalar(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r0 -= r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, 2") +__success __success_unpriv __retval(1) +__naked void value_ptr_known_scalar_2_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 6; \ + r2 = 4; \ + r0 += r1; \ + r0 -= r2; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar -= value_ptr") +__failure __msg("R1 tried to subtract pointer from scalar") +__failure_unpriv +__naked void access_unknown_scalar_value_ptr(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r1 -= r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= unknown scalar") +__failure __msg("R0 min value is negative") +__failure_unpriv +__naked void access_value_ptr_unknown_scalar(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r0 -= r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= unknown scalar, 2") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void value_ptr_unknown_scalar_2_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r1 |= 0x7; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0x7; \ + r0 -= r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= value_ptr") +__failure __msg("R0 invalid mem access 'scalar'") +__msg_unpriv("R0 pointer -= pointer prohibited") +__naked void access_value_ptr_value_ptr_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 -= r0; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: trying to leak tainted dst reg") +__failure __msg("math between map_value pointer and 4294967295 is not allowed") +__failure_unpriv +__naked void to_leak_tainted_dst_reg(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r2 = r0; \ + w1 = 0xFFFFFFFF; \ + w1 = w1; \ + r2 -= r1; \ + *(u64*)(r0 + 0) = r2; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("tc") +__description("32bit pkt_ptr -= scalar") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void _32bit_pkt_ptr_scalar(void) +{ + asm volatile (" \ + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ + r6 = r7; \ + r6 += 40; \ + if r6 > r8 goto l0_%=; \ + w4 = w7; \ + w6 -= w4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("32bit scalar -= pkt_ptr") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void _32bit_scalar_pkt_ptr(void) +{ + asm volatile (" \ + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ + r6 = r7; \ + r6 += 40; \ + if r6 > r8 goto l0_%=; \ + w4 = w6; \ + w4 -= w7; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 47e9e076bc8f..b4edd8454934 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -25,6 +25,8 @@ #define TEST_TAG_DESCRIPTION_PFX "comment:test_description=" #define TEST_TAG_RETVAL_PFX "comment:test_retval=" #define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv=" +#define TEST_TAG_AUXILIARY "comment:test_auxiliary" +#define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv" /* Warning: duplicated in bpf_misc.h */ #define POINTER_VALUE 0xcafe4all @@ -59,6 +61,8 @@ struct test_spec { int log_level; int prog_flags; int mode_mask; + bool auxiliary; + bool valid; }; static int tester_init(struct test_loader *tester) @@ -87,6 +91,11 @@ static void free_test_spec(struct test_spec *spec) free(spec->unpriv.name); free(spec->priv.expect_msgs); free(spec->unpriv.expect_msgs); + + spec->priv.name = NULL; + spec->unpriv.name = NULL; + spec->priv.expect_msgs = NULL; + spec->unpriv.expect_msgs = NULL; } static int push_msg(const char *msg, struct test_subspec *subspec) @@ -204,6 +213,12 @@ static int parse_test_spec(struct test_loader *tester, spec->unpriv.expect_failure = false; spec->mode_mask |= UNPRIV; has_unpriv_result = true; + } else if (strcmp(s, TEST_TAG_AUXILIARY) == 0) { + spec->auxiliary = true; + spec->mode_mask |= PRIV; + } else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) { + spec->auxiliary = true; + spec->mode_mask |= UNPRIV; } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) { msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; err = push_msg(msg, &spec->priv); @@ -314,6 +329,8 @@ static int parse_test_spec(struct test_loader *tester, } } + spec->valid = true; + return 0; cleanup: @@ -516,16 +533,18 @@ void run_subtest(struct test_loader *tester, struct bpf_object_open_opts *open_opts, const void *obj_bytes, size_t obj_byte_cnt, + struct test_spec *specs, struct test_spec *spec, bool unpriv) { struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv; + struct bpf_program *tprog, *tprog_iter; + struct test_spec *spec_iter; struct cap_state caps = {}; - struct bpf_program *tprog; struct bpf_object *tobj; struct bpf_map *map; - int retval; - int err; + int retval, err, i; + bool should_load; if (!test__start_subtest(subspec->name)) return; @@ -546,15 +565,23 @@ void run_subtest(struct test_loader *tester, if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */ goto subtest_cleanup; - bpf_object__for_each_program(tprog, tobj) - bpf_program__set_autoload(tprog, false); + i = 0; + bpf_object__for_each_program(tprog_iter, tobj) { + spec_iter = &specs[i++]; + should_load = false; - bpf_object__for_each_program(tprog, tobj) { - /* only load specified program */ - if (strcmp(bpf_program__name(tprog), spec->prog_name) == 0) { - bpf_program__set_autoload(tprog, true); - break; + if (spec_iter->valid) { + if (strcmp(bpf_program__name(tprog_iter), spec->prog_name) == 0) { + tprog = tprog_iter; + should_load = true; + } + + if (spec_iter->auxiliary && + spec_iter->mode_mask & (unpriv ? UNPRIV : PRIV)) + should_load = true; } + + bpf_program__set_autoload(tprog_iter, should_load); } prepare_case(tester, spec, tobj, tprog); @@ -587,9 +614,17 @@ void run_subtest(struct test_loader *tester, /* For some reason test_verifier executes programs * with all capabilities restored. Do the same here. */ - if (!restore_capabilities(&caps)) + if (restore_capabilities(&caps)) goto tobj_cleanup; + if (tester->pre_execution_cb) { + err = tester->pre_execution_cb(tobj); + if (err) { + PRINT_FAIL("pre_execution_cb failed: %d\n", err); + goto tobj_cleanup; + } + } + do_prog_test_run(bpf_program__fd(tprog), &retval); if (retval != subspec->retval && subspec->retval != POINTER_VALUE) { PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval); @@ -609,11 +644,12 @@ static void process_subtest(struct test_loader *tester, skel_elf_bytes_fn elf_bytes_factory) { LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name); + struct test_spec *specs = NULL; struct bpf_object *obj = NULL; struct bpf_program *prog; const void *obj_bytes; + int err, i, nr_progs; size_t obj_byte_cnt; - int err; if (tester_init(tester) < 0) return; /* failed to initialize tester */ @@ -623,25 +659,42 @@ static void process_subtest(struct test_loader *tester, if (!ASSERT_OK_PTR(obj, "obj_open_mem")) return; - bpf_object__for_each_program(prog, obj) { - struct test_spec spec; + nr_progs = 0; + bpf_object__for_each_program(prog, obj) + ++nr_progs; - /* if we can't derive test specification, go to the next test */ - err = parse_test_spec(tester, obj, prog, &spec); - if (err) { + specs = calloc(nr_progs, sizeof(struct test_spec)); + if (!ASSERT_OK_PTR(specs, "Can't alloc specs array")) + return; + + i = 0; + bpf_object__for_each_program(prog, obj) { + /* ignore tests for which we can't derive test specification */ + err = parse_test_spec(tester, obj, prog, &specs[i++]); + if (err) PRINT_FAIL("Can't parse test spec for program '%s'\n", bpf_program__name(prog)); + } + + i = 0; + bpf_object__for_each_program(prog, obj) { + struct test_spec *spec = &specs[i++]; + + if (!spec->valid || spec->auxiliary) continue; - } - if (spec.mode_mask & PRIV) - run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, &spec, false); - if (spec.mode_mask & UNPRIV) - run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, &spec, true); + if (spec->mode_mask & PRIV) + run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, + specs, spec, false); + if (spec->mode_mask & UNPRIV) + run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, + specs, spec, true); - free_test_spec(&spec); } + for (i = 0; i < nr_progs; ++i) + free_test_spec(&specs[i]); + free(specs); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 10ba43250668..0ed3134333d4 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -424,14 +424,23 @@ int get_bpf_max_tramp_links(void); #define BPF_TESTMOD_TEST_FILE "/sys/kernel/bpf_testmod" +typedef int (*pre_execution_cb)(struct bpf_object *obj); + struct test_loader { char *log_buf; size_t log_buf_sz; size_t next_match_pos; + pre_execution_cb pre_execution_cb; struct bpf_object *obj; }; +static inline void test_loader__set_pre_execution_cb(struct test_loader *tester, + pre_execution_cb cb) +{ + tester->pre_execution_cb = cb; +} + typedef const void *(*skel_elf_bytes_fn)(size_t *sz); extern void test_loader__run_subtests(struct test_loader *tester, diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c deleted file mode 100644 index 43942ce8cf15..000000000000 --- a/tools/testing/selftests/bpf/verifier/bounds.c +++ /dev/null @@ -1,884 +0,0 @@ -{ - "subtraction bounds (map value) variant 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7), - BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 max value is outside of the allowed memory range", - .result = REJECT, -}, -{ - "subtraction bounds (map value) variant 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6), - BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, -}, -{ - "check subtraction on pointers for unpriv", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP), - BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1, 9 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R9 pointer -= pointer prohibited", -}, -{ - "bounds check based on zero-extended MOV", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - /* r2 = 0x0000'0000'ffff'ffff */ - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff), - /* r2 = 0 */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), - /* no-op */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - /* access at offset 0 */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT -}, -{ - "bounds check based on sign-extended MOV. test1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - /* r2 = 0xffff'ffff'ffff'ffff */ - BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), - /* r2 = 0xffff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), - /* r0 = <oob pointer> */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - /* access to OOB pointer */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "map_value pointer and 4294967295", - .result = REJECT -}, -{ - "bounds check based on sign-extended MOV. test2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - /* r2 = 0xffff'ffff'ffff'ffff */ - BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), - /* r2 = 0xfff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36), - /* r0 = <oob pointer> */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - /* access to OOB pointer */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 min value is outside of the allowed memory range", - .result = REJECT -}, -{ - "bounds check based on reg_off + var_off + insn_off. test1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "value_size=8 off=1073741825", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "bounds check based on reg_off + var_off + insn_off. test2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "value 1073741823", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "bounds check after truncation of non-boundary-crossing range", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_2, 1), - /* r2 = 0x10'0000'0000 */ - BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36), - /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), - /* r1 = [0x00, 0xff] */ - BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff), - /* r1 = 0 */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* no-op */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* access at offset 0 */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT -}, -{ - "bounds check after truncation of boundary-crossing range (1)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0x1'0000'007f] */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0xffff'ffff] or - * [0x0000'0000, 0x0000'007f] - */ - BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0x00, 0xff] or - * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] - */ - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* error on OOB pointer computation */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - /* not actually fully unbounded, but the bound is very high */ - .errstr = "value -4294967168 makes map_value pointer be out of bounds", - .result = REJECT, -}, -{ - "bounds check after truncation of boundary-crossing range (2)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0x1'0000'007f] */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0xffff'ffff] or - * [0x0000'0000, 0x0000'007f] - * difference to previous test: truncation via MOV32 - * instead of ALU32. - */ - BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0x00, 0xff] or - * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] - */ - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* error on OOB pointer computation */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "value -4294967168 makes map_value pointer be out of bounds", - .result = REJECT, -}, -{ - "bounds check after wrapping 32-bit addition", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - /* r1 = 0x7fff'ffff */ - BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff), - /* r1 = 0xffff'fffe */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), - /* r1 = 0 */ - BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2), - /* no-op */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* access at offset 0 */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT -}, -{ - "bounds check after shift with oversized count operand", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_IMM(BPF_REG_2, 32), - BPF_MOV64_IMM(BPF_REG_1, 1), - /* r1 = (u32)1 << (u32)32 = ? */ - BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2), - /* r1 = [0x0000, 0xffff] */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff), - /* computes unknown pointer, potentially OOB */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* potentially OOB access */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 max value is outside of the allowed memory range", - .result = REJECT -}, -{ - "bounds check after right shift of maybe-negative number", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - /* r1 = [-0x01, 0xfe] */ - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), - /* r1 = 0 or 0xff'ffff'ffff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* r1 = 0 or 0xffff'ffff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* computes unknown pointer, potentially OOB */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* potentially OOB access */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 unbounded memory access", - .result = REJECT -}, -{ - "bounds check after 32-bit right shift with 64-bit input", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - /* r1 = 2 */ - BPF_MOV64_IMM(BPF_REG_1, 2), - /* r1 = 1<<32 */ - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31), - /* r1 = 0 (NOT 2!) */ - BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31), - /* r1 = 0xffff'fffe (NOT 0!) */ - BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2), - /* error on computing OOB pointer */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "math between map_value pointer and 4294967294 is not allowed", - .result = REJECT, -}, -{ - "bounds check map access with off+size signed 32bit overflow. test1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "map_value pointer and 2147483646", - .result = REJECT -}, -{ - "bounds check map access with off+size signed 32bit overflow. test2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "pointer offset 1073741822", - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .result = REJECT -}, -{ - "bounds check map access with off+size signed 32bit overflow. test3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "pointer offset -1073741822", - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .result = REJECT -}, -{ - "bounds check map access with off+size signed 32bit overflow. test4", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_1, 1000000), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "map_value pointer and 1000000000000", - .result = REJECT -}, -{ - "bounds check mixed 32bit and 64bit arithmetic. test1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - /* r1 = 0xffffFFFF00000001 */ - BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 1, 3), - /* check ALU64 op keeps 32bit bounds */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 2, 1), - BPF_JMP_A(1), - /* invalid ldx if bounds are lost above */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, - .result = ACCEPT -}, -{ - "bounds check mixed 32bit and 64bit arithmetic. test2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - /* r1 = 0xffffFFFF00000001 */ - BPF_MOV64_IMM(BPF_REG_2, 3), - /* r1 = 0x2 */ - BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1), - /* check ALU32 op zero extends 64bit bounds */ - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 1), - BPF_JMP_A(1), - /* invalid ldx if bounds are lost above */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, - .result = ACCEPT -}, -{ - "assigning 32bit bounds to 64bit for wA = 0, wB = wA", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV32_IMM(BPF_REG_9, 0), - BPF_MOV32_REG(BPF_REG_2, BPF_REG_9), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "bounds check for reg = 0, reg xor 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, -}, -{ - "bounds check for reg32 = 0, reg32 xor 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 1), - BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, -}, -{ - "bounds check for reg = 2, reg xor 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_1, 2), - BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, -}, -{ - "bounds check for reg = any, reg xor 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = REJECT, - .errstr = "invalid access to map value", - .errstr_unpriv = "invalid access to map value", -}, -{ - "bounds check for reg32 = any, reg32 xor 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3), - BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = REJECT, - .errstr = "invalid access to map value", - .errstr_unpriv = "invalid access to map value", -}, -{ - "bounds check for reg > 0, reg xor 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLE, BPF_REG_1, 0, 3), - BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, -}, -{ - "bounds check for reg32 > 0, reg32 xor 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP32_IMM(BPF_JLE, BPF_REG_1, 0, 3), - BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3), - BPF_JMP32_IMM(BPF_JGE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, -}, -{ - "bounds checks after 32-bit truncation. test 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - /* This used to reduce the max bound to 0x7fffffff */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0x7fffffff, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "bounds checks after 32-bit truncation. test 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_1, 1, 1), - BPF_JMP32_IMM(BPF_JSLT, BPF_REG_1, 0, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "bound check with JMP_JLT for crossing 64-bit signed boundary", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 8), - - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), - BPF_LD_IMM64(BPF_REG_0, 0x7fffffffffffff10), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - - BPF_LD_IMM64(BPF_REG_0, 0x8000000000000000), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - /* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */ - BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_1, -2), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "bound check with JMP_JSLT for crossing 64-bit signed boundary", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 13), - - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), - BPF_LD_IMM64(BPF_REG_0, 0x7fffffffffffff10), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - - BPF_LD_IMM64(BPF_REG_2, 0x8000000000000fff), - BPF_LD_IMM64(BPF_REG_0, 0x8000000000000000), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_2, 3), - /* r1 signed range is [S64_MIN, S64_MAX] */ - BPF_JMP_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -3), - - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "bound check for loop upper bound greater than U32_MAX", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 8), - - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), - BPF_LD_IMM64(BPF_REG_0, 0x100000000), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - - BPF_LD_IMM64(BPF_REG_0, 0x100000000), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_1, -2), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "bound check with JMP32_JLT for crossing 32-bit signed boundary", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 6), - - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), - BPF_MOV32_IMM(BPF_REG_0, 0x7fffff10), - BPF_ALU32_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - - BPF_MOV32_IMM(BPF_REG_0, 0x80000000), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 1), - /* r1 unsigned range is [0, 0x8000000f] */ - BPF_JMP32_REG(BPF_JLT, BPF_REG_0, BPF_REG_1, -2), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "bound check with JMP32_JSLT for crossing 32-bit signed boundary", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 10), - - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), - BPF_MOV32_IMM(BPF_REG_0, 0x7fffff10), - BPF_ALU32_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - - BPF_MOV32_IMM(BPF_REG_2, 0x80000fff), - BPF_MOV32_IMM(BPF_REG_0, 0x80000000), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP32_REG(BPF_JSGT, BPF_REG_0, BPF_REG_2, 3), - /* r1 signed range is [S32_MIN, S32_MAX] */ - BPF_JMP32_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -3), - - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c deleted file mode 100644 index 3e024c891178..000000000000 --- a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c +++ /dev/null @@ -1,87 +0,0 @@ -{ - "bpf_get_stack return R0 within range", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2), - BPF_MOV64_IMM(BPF_REG_4, 256), - BPF_EMIT_CALL(BPF_FUNC_get_stack), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32), - BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_8, 16), - BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_9), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_EMIT_CALL(BPF_FUNC_get_stack), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bpf_get_task_stack return R0 range is refined", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_6, 0), // ctx->meta->seq - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, 8), // ctx->task - BPF_LD_MAP_FD(BPF_REG_1, 0), // fixup_map_array_48b - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), // keep buf for seq_write - BPF_MOV64_IMM(BPF_REG_3, 48), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_EMIT_CALL(BPF_FUNC_get_task_stack), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_seq_write), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACING, - .expected_attach_type = BPF_TRACE_ITER, - .kfunc = "task", - .runs = -1, // Don't run, just load - .fixup_map_array_48b = { 3 }, -}, diff --git a/tools/testing/selftests/bpf/verifier/btf_ctx_access.c b/tools/testing/selftests/bpf/verifier/btf_ctx_access.c deleted file mode 100644 index 0484d3de040d..000000000000 --- a/tools/testing/selftests/bpf/verifier/btf_ctx_access.c +++ /dev/null @@ -1,25 +0,0 @@ -{ - "btf_ctx_access accept", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 8), /* load 2nd argument value (int pointer) */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACING, - .expected_attach_type = BPF_TRACE_FENTRY, - .kfunc = "bpf_modify_return_test", -}, - -{ - "btf_ctx_access u32 pointer accept", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), /* load 1nd argument value (u32 pointer) */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACING, - .expected_attach_type = BPF_TRACE_FENTRY, - .kfunc = "bpf_fentry_test9", -}, diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c deleted file mode 100644 index 2fd31612c0b8..000000000000 --- a/tools/testing/selftests/bpf/verifier/ctx.c +++ /dev/null @@ -1,186 +0,0 @@ -{ - "context stores via BPF_ATOMIC", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_1, BPF_REG_0, offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "arithmetic ops make PTR_TO_CTX unusable", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, - offsetof(struct __sk_buff, data) - - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .errstr = "dereference of modified ctx ptr", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "pass unmodified ctx pointer to helper", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_update), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "pass modified ctx pointer to helper, 1", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_update), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "negative offset ctx ptr R1 off=-612 disallowed", -}, -{ - "pass modified ctx pointer to helper, 2", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_socket_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result_unpriv = REJECT, - .result = REJECT, - .errstr_unpriv = "negative offset ctx ptr R1 off=-612 disallowed", - .errstr = "negative offset ctx ptr R1 off=-612 disallowed", -}, -{ - "pass modified ctx pointer to helper, 3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_update), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "variable ctx access var_off=(0x0; 0x4)", -}, -{ - "pass ctx or null check, 1: ctx", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, - .result = ACCEPT, -}, -{ - "pass ctx or null check, 2: null", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, - .result = ACCEPT, -}, -{ - "pass ctx or null check, 3: 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, - .result = REJECT, - .errstr = "R1 type=scalar expected=ctx", -}, -{ - "pass ctx or null check, 4: ctx - const", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, - .result = REJECT, - .errstr = "negative offset ctx ptr R1 off=-612 disallowed", -}, -{ - "pass ctx or null check, 5: null (connect)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - .expected_attach_type = BPF_CGROUP_INET4_CONNECT, - .result = ACCEPT, -}, -{ - "pass ctx or null check, 6: null (bind)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, - .result = ACCEPT, -}, -{ - "pass ctx or null check, 7: ctx (bind)", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_socket_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, - .result = ACCEPT, -}, -{ - "pass ctx or null check, 8: null (bind)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_socket_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, - .result = REJECT, - .errstr = "R1 type=scalar expected=ctx", -}, diff --git a/tools/testing/selftests/bpf/verifier/d_path.c b/tools/testing/selftests/bpf/verifier/d_path.c deleted file mode 100644 index b988396379a7..000000000000 --- a/tools/testing/selftests/bpf/verifier/d_path.c +++ /dev/null @@ -1,37 +0,0 @@ -{ - "d_path accept", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0), - BPF_LD_IMM64(BPF_REG_3, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_d_path), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACING, - .expected_attach_type = BPF_TRACE_FENTRY, - .kfunc = "dentry_open", -}, -{ - "d_path reject", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0), - BPF_LD_IMM64(BPF_REG_3, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_d_path), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "helper call is not allowed in probe", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACING, - .expected_attach_type = BPF_TRACE_FENTRY, - .kfunc = "d_path", -}, diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c deleted file mode 100644 index dce2e28aeb43..000000000000 --- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c +++ /dev/null @@ -1,710 +0,0 @@ -{ - "pkt_end - pkt_start is allowed", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = TEST_DATA_LEN, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access off=76", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, -}, -{ - "direct packet access: test4 (write)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test5 (pkt_end >= reg, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test6 (pkt_end >= reg, bad access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid access to packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test7 (pkt_end >= reg, both accesses)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid access to packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test8 (double test, variant 1)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test9 (double test, variant 2)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test10 (write invalid)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid access to packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test11 (shift, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), - BPF_MOV64_IMM(BPF_REG_3, 144), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, -}, -{ - "direct packet access: test12 (and, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), - BPF_MOV64_IMM(BPF_REG_3, 144), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), - BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, -}, -{ - "direct packet access: test13 (branches, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2), - BPF_MOV64_IMM(BPF_REG_3, 14), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_3, 24), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), - BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, -}, -{ - "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), - BPF_MOV64_IMM(BPF_REG_5, 12), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, -}, -{ - "direct packet access: test15 (spill with xadd)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), - BPF_MOV64_IMM(BPF_REG_5, 4096), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_4, BPF_REG_5, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), - BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R2 invalid mem access 'scalar'", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test16 (arith on data_end)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R3 pointer arithmetic on pkt_end", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test17 (pruning, alignment)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14), - BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_A(-6), - }, - .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, -}, -{ - "direct packet access: test18 (imm += pkt_ptr, 1)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 8), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test19 (imm += pkt_ptr, 2)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), - BPF_MOV64_IMM(BPF_REG_4, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test20 (x += pkt_ptr, 1)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test21 (x += pkt_ptr, 2)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), - BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test22 (x += pkt_ptr, 3)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), - BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_4, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test23 (x += pkt_ptr, 4)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test24 (x += pkt_ptr, 5)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 64), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test25 (marking on <, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, -4), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test26 (marking on <, bad access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JA, 0, 0, -3), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test27 (marking on <=, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, -}, -{ - "direct packet access: test28 (marking on <=, bad access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, -4), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test29 (reg > pkt_end in subprog)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test30 (check_id() in regsafe(), bad access)", - .insns = { - /* r9 = ctx */ - BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), - /* r7 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r6 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* r2 = ctx->data - * r3 = ctx->data - * r4 = ctx->data_end - */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_9, offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_9, offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_9, offsetof(struct __sk_buff, data_end)), - /* if r6 > 100 goto exit - * if r7 > 100 goto exit - */ - BPF_JMP_IMM(BPF_JGT, BPF_REG_6, 100, 9), - BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 100, 8), - /* r2 += r6 ; this forces assignment of ID to r2 - * r2 += 1 ; get some fixed off for r2 - * r3 += r7 ; this forces assignment of ID to r3 - * r3 += 1 ; get some fixed off for r3 - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1), - /* if r6 > r7 goto +1 ; no new information about the state is derived from - * ; this check, thus produced verifier states differ - * ; only in 'insn_idx' - * r2 = r3 ; optionally share ID between r2 and r3 - */ - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_7, 1), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), - /* if r3 > ctx->data_end goto exit */ - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 1), - /* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2, - * ; this is not always safe - */ - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, -1), - /* exit(0) */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .flags = BPF_F_TEST_STATE_FREQ, - .result = REJECT, - .errstr = "invalid access to packet, off=0 size=1, R2", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, diff --git a/tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c b/tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c deleted file mode 100644 index 67a1c07ead34..000000000000 --- a/tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c +++ /dev/null @@ -1,174 +0,0 @@ -{ - /* This is equivalent to the following program: - * - * r6 = skb->sk; - * r7 = sk_fullsock(r6); - * r0 = sk_fullsock(r6); - * if (r0 == 0) return 0; (a) - * if (r0 != r7) return 0; (b) - * *r7->type; (c) - * return 0; - * - * It is safe to dereference r7 at point (c), because of (a) and (b). - * The test verifies that relation r0 == r7 is propagated from (b) to (c). - */ - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch", - .insns = { - /* r6 = skb->sk; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), - /* if (r6 == 0) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 8), - /* r7 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r0 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - /* if (r0 == null) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - /* if (r0 == r7) r0 = *(r7->type); */ - BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_7, 1), /* Use ! JNE ! */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), - /* return 0 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R7 pointer comparison", -}, -{ - /* Same as above, but verify that another branch of JNE still - * prohibits access to PTR_MAYBE_NULL. - */ - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch", - .insns = { - /* r6 = skb->sk */ - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), - /* if (r6 == 0) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 9), - /* r7 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r0 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - /* if (r0 == null) return 0; */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - /* if (r0 == r7) return 0; */ - BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_7, 1), /* Use ! JNE ! */ - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - /* r0 = *(r7->type); */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), - /* return 0 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "R7 invalid mem access 'sock_or_null'", - .result_unpriv = REJECT, - .errstr_unpriv = "R7 pointer comparison", -}, -{ - /* Same as a first test, but not null should be inferred for JEQ branch */ - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch", - .insns = { - /* r6 = skb->sk; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), - /* if (r6 == null) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 9), - /* r7 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r0 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - /* if (r0 == null) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - /* if (r0 != r7) return 0; */ - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_7, 1), /* Use ! JEQ ! */ - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - /* r0 = *(r7->type); */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), - /* return 0; */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R7 pointer comparison", -}, -{ - /* Same as above, but verify that another branch of JNE still - * prohibits access to PTR_MAYBE_NULL. - */ - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch", - .insns = { - /* r6 = skb->sk; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), - /* if (r6 == null) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 8), - /* r7 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r0 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - /* if (r0 == null) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - /* if (r0 != r7) r0 = *(r7->type); */ - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_7, 1), /* Use ! JEQ ! */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), - /* return 0; */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "R7 invalid mem access 'sock_or_null'", - .result_unpriv = REJECT, - .errstr_unpriv = "R7 pointer comparison", -}, -{ - /* Maps are treated in a different branch of `mark_ptr_not_null_reg`, - * so separate test for maps case. - */ - "jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE", - .insns = { - /* r9 = &some stack to use as key */ - BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_9, -8), - /* r8 = process local map */ - BPF_LD_MAP_FD(BPF_REG_8, 0), - /* r6 = map_lookup_elem(r8, r9); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* r7 = map_lookup_elem(r8, r9); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* if (r6 == 0) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2), - /* if (r6 != r7) return 0; */ - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_7, 1), - /* read *r7; */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_xdp_sock, queue_id)), - /* return 0; */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_xskmap = { 3 }, - .prog_type = BPF_PROG_TYPE_XDP, - .result = ACCEPT, -}, diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c deleted file mode 100644 index 1af37187dc12..000000000000 --- a/tools/testing/selftests/bpf/verifier/loops1.c +++ /dev/null @@ -1,206 +0,0 @@ -{ - "bounded loop, count to 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .retval = 4, -}, -{ - "bounded loop, count to 20", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 20, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bounded loop, count from positive unknown to 4", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .retval = 4, -}, -{ - "bounded loop, count from totally unknown to 4", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bounded loop, count to 4 with equality", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bounded loop, start in the middle", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_A(1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "back-edge", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .retval = 4, -}, -{ - "bounded loop containing a forward jump", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -3), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .retval = 4, -}, -{ - "bounded loop that jumps out rather than in", - .insns = { - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_6, 10000, 2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_JMP_A(-4), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "infinite loop after a conditional jump", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 5), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, 2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_A(-2), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "program is too large", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bounded recursion", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 4, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "back-edge", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "infinite loop in two jumps", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_A(0), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "loop detected", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "infinite loop: three-jump trick", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, -11), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "loop detected", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "not-taken loop with back jump to 1st insn", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 123), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .retval = 123, -}, -{ - "taken loop with back jump to 1st insn", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, -3), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .retval = 55, -}, -{ - "taken loop with back jump to 1st insn, 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), - BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, -3), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .retval = 55, -}, diff --git a/tools/testing/selftests/bpf/verifier/lwt.c b/tools/testing/selftests/bpf/verifier/lwt.c deleted file mode 100644 index 5c8944d0b091..000000000000 --- a/tools/testing/selftests/bpf/verifier/lwt.c +++ /dev/null @@ -1,189 +0,0 @@ -{ - "invalid direct packet write for LWT_IN", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "cannot write into packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "invalid direct packet write for LWT_OUT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "cannot write into packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_OUT, -}, -{ - "direct packet write for LWT_XMIT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, -}, -{ - "direct packet read for LWT_IN", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "direct packet read for LWT_OUT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_OUT, -}, -{ - "direct packet read for LWT_XMIT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, -}, -{ - "overlapping checks for direct packet access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, -}, -{ - "make headroom for LWT_XMIT", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_skb_change_head), - /* split for s390 to succeed */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 42), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_skb_change_head), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, -}, -{ - "invalid access of tc_classid for LWT_IN", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", -}, -{ - "invalid access of tc_classid for LWT_OUT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", -}, -{ - "invalid access of tc_classid for LWT_XMIT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", -}, -{ - "check skb->tc_classid half load not permitted for lwt prog", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), -#else - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid) + 2), -#endif - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, diff --git a/tools/testing/selftests/bpf/verifier/map_in_map.c b/tools/testing/selftests/bpf/verifier/map_in_map.c deleted file mode 100644 index 128a348b762d..000000000000 --- a/tools/testing/selftests/bpf/verifier/map_in_map.c +++ /dev/null @@ -1,96 +0,0 @@ -{ - "map in map access", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 3 }, - .result = ACCEPT, -}, -{ - "map in map state pruning", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 11), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 4, 14 }, - .flags = BPF_F_TEST_STATE_FREQ, - .result = VERBOSE_ACCEPT, - .errstr = "processed 25 insns", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "invalid inner map pointer", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 3 }, - .errstr = "R1 pointer arithmetic on map_ptr prohibited", - .result = REJECT, -}, -{ - "forgot null checking on the inner map pointer", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 3 }, - .errstr = "R1 type=map_value_or_null expected=map_ptr", - .result = REJECT, -}, diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c index d775ccb01989..a0cfc06d75bc 100644 --- a/tools/testing/selftests/bpf/verifier/map_kptr.c +++ b/tools/testing/selftests/bpf/verifier/map_kptr.c @@ -288,33 +288,6 @@ .result = REJECT, .errstr = "off=0 kptr isn't referenced kptr", }, -{ - "map_kptr: unref: bpf_kfunc_call_test_kptr_get rejected", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_LD_MAP_FD(BPF_REG_6, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_kptr = { 1 }, - .result = REJECT, - .errstr = "arg#0 no referenced kptr at map value offset=0", - .fixup_kfunc_btf_id = { - { "bpf_kfunc_call_test_kptr_get", 13 }, - } -}, /* Tests for referenced PTR_TO_BTF_ID */ { "map_kptr: ref: loaded pointer marked as untrusted", diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c deleted file mode 100644 index 1f2b8c4cb26d..000000000000 --- a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c +++ /dev/null @@ -1,100 +0,0 @@ -{ - "calls: two calls returning different map pointers for lookup (hash, array)", - .insns = { - /* main prog */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_CALL_REL(11), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_CALL_REL(12), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - /* subprog 1 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* subprog 2 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_hash_48b = { 13 }, - .fixup_map_array_48b = { 16 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "calls: two calls returning different map pointers for lookup (hash, map in map)", - .insns = { - /* main prog */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_CALL_REL(11), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_CALL_REL(12), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - /* subprog 1 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* subprog 2 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_in_map = { 16 }, - .fixup_map_array_48b = { 13 }, - .result = REJECT, - .errstr = "only read from bpf_array is supported", -}, -{ - "cond: two branches returning different map pointers for lookup (tail, tail)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5 }, - .fixup_prog2 = { 2 }, - .result_unpriv = REJECT, - .errstr_unpriv = "tail_call abusing map_ptr", - .result = ACCEPT, - .retval = 42, -}, -{ - "cond: two branches returning same map pointers for lookup (tail, tail)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog2 = { 2, 5 }, - .result_unpriv = ACCEPT, - .result = ACCEPT, - .retval = 42, -}, diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c deleted file mode 100644 index 5a2e154dd1e0..000000000000 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ /dev/null @@ -1,1082 +0,0 @@ -{ - "reference tracking: leak potential reference", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: leak potential reference to sock_common", - .insns = { - BPF_SK_LOOKUP(skc_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: leak potential reference on stack", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: leak potential reference on stack 2", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: zero potential reference", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: zero potential reference to sock_common", - .insns = { - BPF_SK_LOOKUP(skc_lookup_tcp), - BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: copy and zero potential references", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: acquire/release user key reference", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .fixup_kfunc_btf_id = { - { "bpf_lookup_user_key", 2 }, - { "bpf_key_put", 5 }, - }, - .result = ACCEPT, -}, -{ - "reference tracking: acquire/release system key reference", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .fixup_kfunc_btf_id = { - { "bpf_lookup_system_key", 1 }, - { "bpf_key_put", 4 }, - }, - .result = ACCEPT, -}, -{ - "reference tracking: release user key reference without check", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .errstr = "Possibly NULL pointer passed to trusted arg0", - .fixup_kfunc_btf_id = { - { "bpf_lookup_user_key", 2 }, - { "bpf_key_put", 4 }, - }, - .result = REJECT, -}, -{ - "reference tracking: release system key reference without check", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .errstr = "Possibly NULL pointer passed to trusted arg0", - .fixup_kfunc_btf_id = { - { "bpf_lookup_system_key", 1 }, - { "bpf_key_put", 3 }, - }, - .result = REJECT, -}, -{ - "reference tracking: release with NULL key pointer", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .errstr = "Possibly NULL pointer passed to trusted arg0", - .fixup_kfunc_btf_id = { - { "bpf_key_put", 1 }, - }, - .result = REJECT, -}, -{ - "reference tracking: leak potential reference to user key", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .errstr = "Unreleased reference", - .fixup_kfunc_btf_id = { - { "bpf_lookup_user_key", 2 }, - }, - .result = REJECT, -}, -{ - "reference tracking: leak potential reference to system key", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .errstr = "Unreleased reference", - .fixup_kfunc_btf_id = { - { "bpf_lookup_system_key", 1 }, - }, - .result = REJECT, -}, -{ - "reference tracking: release reference without check", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - /* reference in r0 may be NULL */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=sock_or_null expected=sock", - .result = REJECT, -}, -{ - "reference tracking: release reference to sock_common without check", - .insns = { - BPF_SK_LOOKUP(skc_lookup_tcp), - /* reference in r0 may be NULL */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=sock_common_or_null expected=sock", - .result = REJECT, -}, -{ - "reference tracking: release reference", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: release reference to sock_common", - .insns = { - BPF_SK_LOOKUP(skc_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: release reference 2", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: release reference twice", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=scalar expected=sock", - .result = REJECT, -}, -{ - "reference tracking: release reference twice inside branch", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */ - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=scalar expected=sock", - .result = REJECT, -}, -{ - "reference tracking: alloc, check, free in one subbranch", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16), - /* if (offsetof(skb, mark) > data_len) exit; */ - BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, - offsetof(struct __sk_buff, mark)), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */ - /* Leak reference in R0 */ - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking: alloc, check, free in both subbranches", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16), - /* if (offsetof(skb, mark) > data_len) exit; */ - BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, - offsetof(struct __sk_buff, mark)), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking in call: free reference in subprog", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking in call: free reference in subprog and outside", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=scalar expected=sock", - .result = REJECT, -}, -{ - "reference tracking in call: alloc & leak reference in subprog", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_4), - BPF_SK_LOOKUP(sk_lookup_tcp), - /* spill unchecked sk_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking in call: alloc in subprog, release outside", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_EXIT_INSN(), /* return sk */ - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = POINTER_VALUE, - .result = ACCEPT, -}, -{ - "reference tracking in call: sk_ptr leak into caller stack", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), - /* spill unchecked sk_ptr into stack of caller */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 2 */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking in call: sk_ptr spill into caller stack", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), - /* spill unchecked sk_ptr into stack of caller */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - /* now the sk_ptr is verified, free the reference */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - - /* subprog 2 */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: allow LD_ABS", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: forbid LD_ABS while holding reference", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references", - .result = REJECT, -}, -{ - "reference tracking: allow LD_IND", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_7, 1), - BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, -}, -{ - "reference tracking: forbid LD_IND while holding reference", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_7, 1), - BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references", - .result = REJECT, -}, -{ - "reference tracking: check reference or tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - /* if (sk) bpf_sk_release() */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7), - /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 17 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: release reference then tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - /* if (sk) bpf_sk_release() */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 18 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: leak possible reference over tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - /* Look up socket and store in REG_6 */ - BPF_SK_LOOKUP(sk_lookup_tcp), - /* bpf_tail_call() */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - /* if (sk) bpf_sk_release() */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 16 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "tail_call would lead to reference leak", - .result = REJECT, -}, -{ - "reference tracking: leak checked reference over tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - /* Look up socket and store in REG_6 */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* if (!sk) goto end */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 17 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "tail_call would lead to reference leak", - .result = REJECT, -}, -{ - "reference tracking: mangle and release sock_or_null", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "R1 pointer arithmetic on sock_or_null prohibited", - .result = REJECT, -}, -{ - "reference tracking: mangle and release sock", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "R1 pointer arithmetic on sock prohibited", - .result = REJECT, -}, -{ - "reference tracking: access member", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: write to member", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_LD_IMM64(BPF_REG_2, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2, - offsetof(struct bpf_sock, mark)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LD_IMM64(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "cannot write into sock", - .result = REJECT, -}, -{ - "reference tracking: invalid 64-bit access of member", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "invalid sock access off=0 size=8", - .result = REJECT, -}, -{ - "reference tracking: access after release", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "!read_ok", - .result = REJECT, -}, -{ - "reference tracking: direct access for lookup", - .insns = { - /* Check that the packet is at least 64B long */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), - /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ - BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: use ptr from bpf_tcp_sock() after release", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking: use ptr from bpf_sk_fullsock() after release", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking: use ptr from bpf_sk_fullsock(tp) after release", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking: use sk after bpf_sk_release(tp)", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_listener_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: bpf_sk_release(listen_sk)", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_listener_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R1 must be referenced when passed to release function", -}, -{ - /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */ - "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", -}, -{ - "reference tracking: branch tracking valid pointer null comparison", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: branch tracking valid pointer value comparison", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 1234, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: bpf_sk_release(btf_tcp_sock)", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "unknown func", -}, -{ - "reference tracking: use ptr from bpf_skc_to_tcp_sock() after release", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .result_unpriv = REJECT, - .errstr_unpriv = "unknown func", -}, -{ - "reference tracking: try to leak released ptr reg", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_ringbuf_reserve), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_ringbuf_discard), - BPF_MOV64_IMM(BPF_REG_0, 0), - - BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_8, 0), - BPF_EXIT_INSN() - }, - .fixup_map_array_48b = { 4 }, - .fixup_map_ringbuf = { 11 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R8 !read_ok" -}, diff --git a/tools/testing/selftests/bpf/verifier/regalloc.c b/tools/testing/selftests/bpf/verifier/regalloc.c deleted file mode 100644 index bb0dd89dd212..000000000000 --- a/tools/testing/selftests/bpf/verifier/regalloc.c +++ /dev/null @@ -1,277 +0,0 @@ -{ - "regalloc basic", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 4), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc negative", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 24, 4), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=48 off=48 size=1", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "regalloc src_reg mark", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 5), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_3, BPF_REG_2, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc src_reg negative", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 22, 5), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_3, BPF_REG_2, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=48 off=44 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc and spill", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 7), - /* r0 has upper bound that should propagate into r2 */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), /* spill r2 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_2, 0), /* clear r0 and r2 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 */ - BPF_JMP_REG(BPF_JSGE, BPF_REG_0, BPF_REG_3, 2), - /* r3 has lower and upper bounds */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_3), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc and spill negative", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 48, 7), - /* r0 has upper bound that should propagate into r2 */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), /* spill r2 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_2, 0), /* clear r0 and r2 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 */ - BPF_JMP_REG(BPF_JSGE, BPF_REG_0, BPF_REG_3, 2), - /* r3 has lower and upper bounds */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_3), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=48 off=48 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc three regs", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 12, 5), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_4), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc after call", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 20, 4), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_9, 0, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_8), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_9), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc in callee", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 20, 5), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc, spill, JEQ", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), /* spill r0 */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 0), - /* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */ - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 20, 0), - /* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */ - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 with map_value */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1), /* skip ldx if map_value == NULL */ - /* Buggy verifier will think that r3 == 20 here */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), /* read from map_value */ - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, diff --git a/tools/testing/selftests/bpf/verifier/runtime_jit.c b/tools/testing/selftests/bpf/verifier/runtime_jit.c deleted file mode 100644 index 94c399d1faca..000000000000 --- a/tools/testing/selftests/bpf/verifier/runtime_jit.c +++ /dev/null @@ -1,231 +0,0 @@ -{ - "runtime/jit: tail_call within bounds, prog once", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 42, -}, -{ - "runtime/jit: tail_call within bounds, prog loop", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 41, -}, -{ - "runtime/jit: tail_call within bounds, no prog", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "runtime/jit: tail_call within bounds, key 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 24, -}, -{ - "runtime/jit: tail_call within bounds, key 2 / key 2, first branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 13), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5, 9 }, - .result = ACCEPT, - .retval = 24, -}, -{ - "runtime/jit: tail_call within bounds, key 2 / key 2, second branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 14), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5, 9 }, - .result = ACCEPT, - .retval = 24, -}, -{ - "runtime/jit: tail_call within bounds, key 0 / key 2, first branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 13), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5, 9 }, - .result = ACCEPT, - .retval = 24, -}, -{ - "runtime/jit: tail_call within bounds, key 0 / key 2, second branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 14), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5, 9 }, - .result = ACCEPT, - .retval = 42, -}, -{ - "runtime/jit: tail_call within bounds, different maps, first branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 13), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5 }, - .fixup_prog2 = { 9 }, - .result_unpriv = REJECT, - .errstr_unpriv = "tail_call abusing map_ptr", - .result = ACCEPT, - .retval = 1, -}, -{ - "runtime/jit: tail_call within bounds, different maps, second branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 14), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5 }, - .fixup_prog2 = { 9 }, - .result_unpriv = REJECT, - .errstr_unpriv = "tail_call abusing map_ptr", - .result = ACCEPT, - .retval = 42, -}, -{ - "runtime/jit: tail_call out of bounds", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 256), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 2, -}, -{ - "runtime/jit: pass negative index to tail_call", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, -1), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 2, -}, -{ - "runtime/jit: pass > 32bit index to tail_call", - .insns = { - BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 2 }, - .result = ACCEPT, - .retval = 42, - /* Verifier rewrite for unpriv skips tail call here. */ - .retval_unpriv = 2, -}, diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c deleted file mode 100644 index 745d6b5842fd..000000000000 --- a/tools/testing/selftests/bpf/verifier/search_pruning.c +++ /dev/null @@ -1,266 +0,0 @@ -{ - "pointer/scalar confusion in state equality check (way 1)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_A(1), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .retval = POINTER_VALUE, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 leaks addr as return value" -}, -{ - "pointer/scalar confusion in state equality check (way 2)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), - BPF_JMP_A(1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .retval = POINTER_VALUE, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 leaks addr as return value" -}, -{ - "liveness pruning and write screening", - .insns = { - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* branch conditions teach us nothing about R2 */ - BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R0 !read_ok", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "varlen_map_value_access pruning", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), - BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .errstr = "R0 unbounded memory access", - .result_unpriv = REJECT, - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "search pruning: all branches should be verified (nop operation)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_A(1), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2), - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R6 invalid mem access 'scalar'", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "search pruning: all branches should be verified (invalid stack access)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), - BPF_JMP_A(1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24), - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "invalid read from stack off -16+0 size 8", - .result_unpriv = REJECT, - /* in privileged mode reads from uninitialized stack locations are permitted */ - .result = ACCEPT, -}, -{ - "precision tracking for u32 spill/fill", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV32_IMM(BPF_REG_6, 32), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_MOV32_IMM(BPF_REG_6, 4), - /* Additional insns to introduce a pruning point. */ - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - /* u32 spill/fill */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8), - /* out-of-bound map value access for r6=32 */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 15 }, - .result = REJECT, - .errstr = "R0 min value is outside of the allowed memory range", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "precision tracking for u32 spills, u64 fill", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV32_IMM(BPF_REG_7, 0xffffffff), - /* Additional insns to introduce a pruning point. */ - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0), - /* u32 spills, u64 fill */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8), - /* if r8 != X goto pc+1 r8 known in fallthrough branch */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - /* if r8 == X goto pc+1 condition always true on first - * traversal, so starts backtracking to mark r8 as requiring - * precision. r7 marked as needing precision. r6 not marked - * since it's not tracked. - */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1), - /* fails if r8 correctly marked unknown after fill. */ - BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "div by zero", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "allocated_stack", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8), - BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9), - BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = ACCEPT, - .insn_processed = 15, -}, -/* The test performs a conditional 64-bit write to a stack location - * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], - * then data is read from fp[-8]. This sequence is unsafe. - * - * The test would be mistakenly marked as safe w/o dst register parent - * preservation in verifier.c:copy_register_state() function. - * - * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the - * checkpoint state after conditional 64-bit assignment. - */ -{ - "write tracking and register parent chain bug", - .insns = { - /* r6 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* r0 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - /* if r0 > r6 goto +1 */ - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1), - /* *(u64 *)(r10 - 8) = 0xdeadbeef */ - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef), - /* r1 = 42 */ - BPF_MOV64_IMM(BPF_REG_1, 42), - /* *(u8 *)(r10 - 8) = r1 */ - BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8), - /* r2 = *(u64 *)(r10 - 8) */ - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8), - /* exit(0) */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .flags = BPF_F_TEST_STATE_FREQ, - .errstr_unpriv = "invalid read from stack off -8+1 size 8", - .result_unpriv = REJECT, - /* in privileged mode reads from uninitialized stack locations are permitted */ - .result = ACCEPT, -}, diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c deleted file mode 100644 index 108dd3ee1edd..000000000000 --- a/tools/testing/selftests/bpf/verifier/sock.c +++ /dev/null @@ -1,706 +0,0 @@ -{ - "skb->sk: no NULL check", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid mem access 'sock_common_or_null'", -}, -{ - "skb->sk: sk->family [non fullsock field]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, family)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "skb->sk: sk->type [fullsock field]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, type)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid sock_common access", -}, -{ - "bpf_sk_fullsock(skb->sk): no !skb->sk check", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "type=sock_common_or_null expected=sock_common", -}, -{ - "sk_fullsock(skb->sk): no NULL check on ret", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid mem access 'sock_or_null'", -}, -{ - "sk_fullsock(skb->sk): sk->type [fullsock field]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->family [non fullsock field]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, family)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->state [narrow load]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, state)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->dst_port [half load]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid sock access", -}, -{ - "sk_fullsock(skb->sk): sk->dst_port [byte load]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), - BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid sock access", -}, -{ - "sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, dst_port)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid sock access", -}, -{ - "sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->type [narrow load]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->protocol [narrow load]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, protocol)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): beyond last field", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, rx_queue_mapping)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid sock access", -}, -{ - "bpf_tcp_sock(skb->sk): no !skb->sk check", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "type=sock_common_or_null expected=sock_common", -}, -{ - "bpf_tcp_sock(skb->sk): no NULL check on ret", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid mem access 'tcp_sock_or_null'", -}, -{ - "bpf_tcp_sock(skb->sk): tp->snd_cwnd", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "bpf_tcp_sock(skb->sk): tp->bytes_acked", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, bytes_acked)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "bpf_tcp_sock(skb->sk): beyond last field", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_tcp_sock, bytes_acked)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid tcp_sock access", -}, -{ - "bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "bpf_sk_release(skb->sk)", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R1 must be referenced when passed to release function", -}, -{ - "bpf_sk_release(bpf_sk_fullsock(skb->sk))", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R1 must be referenced when passed to release function", -}, -{ - "bpf_sk_release(bpf_tcp_sock(skb->sk))", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R1 must be referenced when passed to release function", -}, -{ - "sk_storage_get(map, skb->sk, NULL, 0): value == NULL", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_storage_get), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_sk_storage_map = { 11 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "sk_storage_get(map, skb->sk, 1, 1): value == 1", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_storage_get), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_sk_storage_map = { 11 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R3 type=scalar expected=fp", -}, -{ - "sk_storage_get(map, skb->sk, &stack_value, 1): stack_value", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_storage_get), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_sk_storage_map = { 14 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "bpf_map_lookup_elem(smap, &key)", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_sk_storage_map = { 3 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "cannot pass map_type 24 into func bpf_map_lookup_elem", -}, -{ - "bpf_map_lookup_elem(xskmap, &key); xs->queue_id", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_xdp_sock, queue_id)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_xskmap = { 3 }, - .prog_type = BPF_PROG_TYPE_XDP, - .result = ACCEPT, -}, -{ - "bpf_map_lookup_elem(sockmap, &key)", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_sockmap = { 3 }, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .result = REJECT, - .errstr = "Unreleased reference id=2 alloc_insn=5", -}, -{ - "bpf_map_lookup_elem(sockhash, &key)", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_sockhash = { 3 }, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .result = REJECT, - .errstr = "Unreleased reference id=2 alloc_insn=5", -}, -{ - "bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_map_sockmap = { 3 }, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .result = ACCEPT, -}, -{ - "bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_map_sockhash = { 3 }, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .result = ACCEPT, -}, -{ - "bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)", - .insns = { - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport), - BPF_EXIT_INSN(), - }, - .fixup_map_reuseport_array = { 4 }, - .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, - .result = ACCEPT, -}, -{ - "bpf_sk_select_reuseport(ctx, sockmap, &key, flags)", - .insns = { - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport), - BPF_EXIT_INSN(), - }, - .fixup_map_sockmap = { 4 }, - .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, - .result = ACCEPT, -}, -{ - "bpf_sk_select_reuseport(ctx, sockhash, &key, flags)", - .insns = { - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport), - BPF_EXIT_INSN(), - }, - .fixup_map_sockmap = { 4 }, - .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, - .result = ACCEPT, -}, -{ - "mark null check on return value of bpf_skc_to helpers", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_request_sock), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .result_unpriv = REJECT, - .errstr_unpriv = "unknown func", -}, diff --git a/tools/testing/selftests/bpf/verifier/spin_lock.c b/tools/testing/selftests/bpf/verifier/spin_lock.c deleted file mode 100644 index eaf114f07e2e..000000000000 --- a/tools/testing/selftests/bpf/verifier/spin_lock.c +++ /dev/null @@ -1,447 +0,0 @@ -{ - "spin_lock: test1 success", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test2 direct ld/st", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "cannot be accessed directly", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test3 direct ld/st", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "cannot be accessed directly", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "spin_lock: test4 direct ld/st", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 3), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "cannot be accessed directly", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "spin_lock: test5 call within a locked region", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "calls are not allowed", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test6 missing unlock", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "unlock is missing", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test7 unlock without lock", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "without taking a lock", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test8 double lock", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "calls are not allowed", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test9 different lock", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3, 11 }, - .result = REJECT, - .errstr = "unlock of different lock", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test10 lock in subprog without unlock", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "unlock is missing", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test11 ld_abs under lock", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_LD_ABS(BPF_B, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 4 }, - .result = REJECT, - .errstr = "inside bpf_spin_lock", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "spin_lock: regsafe compare reg->id for map value", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_6, offsetof(struct __sk_buff, mark)), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 2 }, - .result = REJECT, - .errstr = "bpf_spin_unlock of different lock", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = BPF_F_TEST_STATE_FREQ, -}, -/* Make sure that regsafe() compares ids for spin lock records using - * check_ids(): - * 1: r9 = map_lookup_elem(...) ; r9.id == 1 - * 2: r8 = map_lookup_elem(...) ; r8.id == 2 - * 3: r7 = ktime_get_ns() - * 4: r6 = ktime_get_ns() - * 5: if r6 > r7 goto <9> - * 6: spin_lock(r8) - * 7: r9 = r8 - * 8: goto <10> - * 9: spin_lock(r9) - * 10: spin_unlock(r9) ; r9.id == 1 || r9.id == 2 and lock is active, - * ; second visit to (10) should be considered safe - * ; if check_ids() is used. - * 11: exit(0) - */ -{ - "spin_lock: regsafe() check_ids() similar id mappings", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - /* r9 = map_lookup_elem(...) */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 24), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), - /* r8 = map_lookup_elem(...) */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 18), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - /* r7 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r6 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* if r6 > r7 goto +5 ; no new information about the state is derived from - * ; this check, thus produced verifier states differ - * ; only in 'insn_idx' - * spin_lock(r8) - * r9 = r8 - * goto unlock - */ - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_EMIT_CALL(BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_8), - BPF_JMP_A(3), - /* spin_lock(r9) */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_EMIT_CALL(BPF_FUNC_spin_lock), - /* spin_unlock(r9) */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_EMIT_CALL(BPF_FUNC_spin_unlock), - /* exit(0) */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3, 10 }, - .result = VERBOSE_ACCEPT, - .errstr = "28: safe", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .flags = BPF_F_TEST_STATE_FREQ, -}, diff --git a/tools/testing/selftests/bpf/verifier/subreg.c b/tools/testing/selftests/bpf/verifier/subreg.c deleted file mode 100644 index 4c4133c80440..000000000000 --- a/tools/testing/selftests/bpf/verifier/subreg.c +++ /dev/null @@ -1,533 +0,0 @@ -/* This file contains sub-register zero extension checks for insns defining - * sub-registers, meaning: - * - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width - * forms (BPF_END) could define sub-registers. - * - Narrow direct loads, BPF_B/H/W | BPF_LDX. - * - BPF_LD is not exposed to JIT back-ends, so no need for testing. - * - * "get_prandom_u32" is used to initialize low 32-bit of some registers to - * prevent potential optimizations done by verifier or JIT back-ends which could - * optimize register back into constant when range info shows one register is a - * constant. - */ -{ - "add32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), - BPF_ALU32_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "add32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - /* An insn could have no effect on the low 32-bit, for example: - * a = a + 0 - * a = a | 0 - * a = a & -1 - * But, they should still zero high 32-bit. - */ - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, -2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "sub32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL), - BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "sub32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mul32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL), - BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mul32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, -1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "div32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, -1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "div32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "or32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL), - BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "or32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "and32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL), - BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "and32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "lsh32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU32_REG(BPF_LSH, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "lsh32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "rsh32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU32_REG(BPF_RSH, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "rsh32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "neg32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_NEG, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mod32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, -1), - BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mod32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "xor32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), - BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "xor32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_XOR, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mov32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), - BPF_MOV32_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mov32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "arsh32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "arsh32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "end16 (to_le) reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 16), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "end32 (to_le) reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 32), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "end16 (to_be) reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 16), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "end32 (to_be) reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 32), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "ldx_b zero extend check", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), - BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "ldx_h zero extend check", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), - BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "ldx_w zero extend check", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), - BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c deleted file mode 100644 index af0c0f336625..000000000000 --- a/tools/testing/selftests/bpf/verifier/unpriv.c +++ /dev/null @@ -1,562 +0,0 @@ -{ - "unpriv: return pointer", - .insns = { - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 leaks addr", - .retval = POINTER_VALUE, -}, -{ - "unpriv: add const to pointer", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, -}, -{ - "unpriv: add pointer to pointer", - .insns = { - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 pointer += pointer", -}, -{ - "unpriv: neg pointer", - .insns = { - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 pointer arithmetic", -}, -{ - "unpriv: cmp pointer with const", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 pointer comparison", -}, -{ - "unpriv: cmp pointer with pointer", - .insns = { - BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R10 pointer comparison", -}, -{ - "unpriv: check that printk is disallowed", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "unknown func bpf_trace_printk#6", - .result_unpriv = REJECT, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "unpriv: pass pointer to helper function", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "R4 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: indirectly pass pointer on stack to helper function", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: mangle pointer on stack 1", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: mangle pointer on stack 2", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: read pointer from stack in small chunks", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid size", - .result = REJECT, -}, -{ - "unpriv: write pointer into ctx", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 leaks addr", - .result_unpriv = REJECT, - .errstr = "invalid bpf_context access", - .result = REJECT, -}, -{ - "unpriv: spill/fill of ctx", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, -}, -{ - "unpriv: spill/fill of ctx 2", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of ctx 3", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 type=fp expected=ctx", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of ctx 4", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_RAW_INSN(BPF_STX | BPF_ATOMIC | BPF_DW, - BPF_REG_10, BPF_REG_0, -8, BPF_ADD), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 type=scalar expected=ctx", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers stx", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct __sk_buff, mark)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - /* Same as above, but use BPF_ST_MEM to save 42 - * instead of BPF_STX_MEM. - */ - "unpriv: spill/fill of different pointers st", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers stx - ctx and sock", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb == NULL) *target = sock; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* else *target = skb; */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* struct __sk_buff *skb = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* skb->mark = 42; */ - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct __sk_buff, mark)), - /* if (sk) bpf_sk_release(sk) */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "type=ctx expected=sock", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers stx - leak sock", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb == NULL) *target = sock; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* else *target = skb; */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* struct __sk_buff *skb = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* skb->mark = 42; */ - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - //.errstr = "same insn cannot be used with different pointers", - .errstr = "Unreleased reference", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers stx - sock and ctx (read)", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb) *target = skb */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* else *target = sock */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* struct bpf_sock *sk = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct bpf_sock, mark)), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers stx - sock and ctx (write)", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb) *target = skb */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* else *target = sock */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* struct bpf_sock *sk = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* if (sk) sk->mark = 42; bpf_sk_release(sk); */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct bpf_sock, mark)), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - //.errstr = "same insn cannot be used with different pointers", - .errstr = "cannot write into sock", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers ldx", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, - -(__s32)offsetof(struct bpf_perf_event_data, - sample_period) - 8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, - offsetof(struct bpf_perf_event_data, sample_period)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_PERF_EVENT, -}, -{ - "unpriv: write pointer into map elem value", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "alu32: mov u32 const", - .insns = { - BPF_MOV32_IMM(BPF_REG_7, 0), - BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1), - BPF_MOV32_REG(BPF_REG_0, BPF_REG_7), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R7 invalid mem access 'scalar'", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 0, -}, -{ - "unpriv: partial copy of pointer", - .insns = { - BPF_MOV32_REG(BPF_REG_1, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R10 partial copy", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: pass pointer to tail_call", - .insns = { - BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .errstr_unpriv = "R3 leaks addr into helper", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: cmp map pointer with zero", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .errstr_unpriv = "R1 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: write into frame pointer", - .insns = { - BPF_MOV64_REG(BPF_REG_10, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "frame pointer is read only", - .result = REJECT, -}, -{ - "unpriv: spill/fill frame pointer", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "frame pointer is read only", - .result = REJECT, -}, -{ - "unpriv: cmp of frame pointer", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R10 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: adding of fp, reg", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: adding of fp, imm", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: cmp of stack pointer", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R2 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, -}, diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c deleted file mode 100644 index d6f29eb4bd57..000000000000 --- a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c +++ /dev/null @@ -1,95 +0,0 @@ -{ - "map element value illegal alu op, 1", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 bitwise operator &= on pointer", - .result = REJECT, -}, -{ - "map element value illegal alu op, 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 32-bit pointer arithmetic prohibited", - .result = REJECT, -}, -{ - "map element value illegal alu op, 3", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 pointer arithmetic with /= operator", - .result = REJECT, -}, -{ - "map element value illegal alu op, 4", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "invalid mem access 'scalar'", - .result = REJECT, - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map element value illegal alu op, 5", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_IMM(BPF_REG_3, 4096), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_2, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "leaking pointer from stack off -8", - .errstr = "R0 invalid mem access 'scalar'", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c deleted file mode 100644 index 249187d3c530..000000000000 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ /dev/null @@ -1,1140 +0,0 @@ -{ - "map access: known scalar += value_ptr unknown vs const", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_1, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr const vs unknown", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), - BPF_MOV64_IMM(BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr const vs const (ne)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), - BPF_MOV64_IMM(BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr const vs const (eq)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr unknown vs unknown (eq)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr unknown vs unknown (lt)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr unknown vs unknown (gt)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr from different maps", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr -= known scalar from different maps", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .retval = 1, -}, -{ - "map access: known scalar += value_ptr from different maps, but same value properties", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: mixing value pointer and scalar, 1", - .insns = { - // load map value pointer into r0 and r2 - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - // load some number from the map into r1 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3), - // branch A - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_A(2), - // branch B - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0x100000), - // common instruction - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - // branch A - BPF_JMP_A(4), - // branch B - BPF_MOV64_IMM(BPF_REG_0, 0x13371337), - // verifier follows fall-through - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - // fake-dead code; targeted from branch A to - // prevent dead code sanitization - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R2 pointer comparison prohibited", - .retval = 0, -}, -{ - "map access: mixing value pointer and scalar, 2", - .insns = { - // load map value pointer into r0 and r2 - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - // load some number from the map into r1 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - // branch A - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0x100000), - BPF_JMP_A(2), - // branch B - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 0), - // common instruction - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - // branch A - BPF_JMP_A(4), - // branch B - BPF_MOV64_IMM(BPF_REG_0, 0x13371337), - // verifier follows fall-through - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - // fake-dead code; targeted from branch A to - // prevent dead code sanitization, rejected - // via branch B however - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .retval = 0, -}, -{ - "sanitation: alu with different scalars 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0x100000), - BPF_JMP_A(2), - BPF_MOV64_IMM(BPF_REG_2, 42), - BPF_MOV64_IMM(BPF_REG_3, 0x100001), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .retval = 0x100000, -}, -{ - "sanitation: alu with different scalars 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_delete_elem), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_EMIT_CALL(BPF_FUNC_map_delete_elem), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .retval = -EINVAL * 2, -}, -{ - "sanitation: alu with different scalars 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, EINVAL), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, EINVAL), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = -EINVAL * 2, -}, -{ - "map access: value_ptr += known scalar, upper oob arith, test 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 48), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, -}, -{ - "map access: value_ptr += known scalar, upper oob arith, test 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, -}, -{ - "map access: value_ptr += known scalar, upper oob arith, test 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr -= known scalar, lower oob arith, test 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 48), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", -}, -{ - "map access: value_ptr -= known scalar, lower oob arith, test 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 48), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, -}, -{ - "map access: value_ptr -= known scalar, lower oob arith, test 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr += known scalar, 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr += known scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "invalid access to map value", -}, -{ - "map access: value_ptr += known scalar, 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "invalid access to map value", -}, -{ - "map access: value_ptr += known scalar, 4", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, -2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr += known scalar, 5", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, -}, -{ - "map access: value_ptr += known scalar, 6", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, -}, -{ - "map access: value_ptr += N, value_ptr -= N known scalar", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV32_IMM(BPF_REG_1, 0x12345678), - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_1, 2), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0x12345678, -}, -{ - "map access: unknown scalar += value_ptr, 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: unknown scalar += value_ptr, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map access: unknown scalar += value_ptr, 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 0xabcdef12, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map access: unknown scalar += value_ptr, 4", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_IMM(BPF_REG_1, 19), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R1 max value is outside of the allowed memory range", - .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map access: value_ptr += unknown scalar, 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr += unknown scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map access: value_ptr += unknown scalar, 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1), - BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_JMP_IMM(BPF_JA, 0, 0, -3), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr += value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 pointer += pointer prohibited", -}, -{ - "map access: known scalar -= value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R1 tried to subtract pointer from scalar", -}, -{ - "map access: value_ptr -= known scalar", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 min value is outside of the allowed memory range", -}, -{ - "map access: value_ptr -= known scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: unknown scalar -= value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R1 tried to subtract pointer from scalar", -}, -{ - "map access: value_ptr -= unknown scalar", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 min value is negative", -}, -{ - "map access: value_ptr -= unknown scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, -}, -{ - "map access: value_ptr -= value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 invalid mem access 'scalar'", - .errstr_unpriv = "R0 pointer -= pointer prohibited", -}, -{ - "map access: trying to leak tainted dst reg", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV32_IMM(BPF_REG_1, 0xFFFFFFFF), - BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 4 }, - .result = REJECT, - .errstr = "math between map_value pointer and 4294967295 is not allowed", -}, -{ - "32bit pkt_ptr -= scalar", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40), - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2), - BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_7), - BPF_ALU32_REG(BPF_SUB, BPF_REG_6, BPF_REG_4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "32bit scalar -= pkt_ptr", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40), - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2), - BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_6), - BPF_ALU32_REG(BPF_SUB, BPF_REG_4, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 5a9691e942de..f144d0604ddf 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -77,6 +77,7 @@ #include <linux/if_link.h> #include <linux/if_ether.h> #include <linux/ip.h> +#include <linux/mman.h> #include <linux/udp.h> #include <arpa/inet.h> #include <net/if.h> @@ -1290,7 +1291,7 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) int ret; if (ifobject->umem->unaligned_mode) - mmap_flags |= MAP_HUGETLB; + mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB; if (ifobject->shared_umem) umem_sz *= 2; @@ -1379,6 +1380,11 @@ static void *worker_testapp_validate_rx(void *arg) pthread_exit(NULL); } +static u64 ceil_u64(u64 a, u64 b) +{ + return (a + b - 1) / b; +} + static void testapp_clean_xsk_umem(struct ifobject *ifobj) { u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size; @@ -1386,6 +1392,7 @@ static void testapp_clean_xsk_umem(struct ifobject *ifobj) if (ifobj->shared_umem) umem_sz *= 2; + umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; xsk_umem__delete(ifobj->umem->umem); munmap(ifobj->umem->buffer, umem_sz); } @@ -1619,14 +1626,15 @@ static void testapp_stats_fill_empty(struct test_spec *test) /* Simple test */ static bool hugepages_present(struct ifobject *ifobject) { - const size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size; + size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size; void *bufs; bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_HUGE_2MB, -1, 0); if (bufs == MAP_FAILED) return false; + mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; munmap(bufs, mmap_sz); return true; } diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 919327807a4e..c535aeab2ca3 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -56,6 +56,7 @@ #define RX_FULL_RXQSIZE 32 #define UMEM_HEADROOM_TEST_SIZE 128 #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1) +#define HUGEPAGE_SIZE (2 * 1024 * 1024) #define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0) |