aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/Makefile1
-rw-r--r--kernel/bpf/arraymap.c18
-rw-r--r--kernel/bpf/cgroup.c94
-rw-r--r--kernel/bpf/core.c46
-rw-r--r--kernel/bpf/cpumap.c12
-rw-r--r--kernel/bpf/devmap.c16
-rw-r--r--kernel/bpf/hashtab.c14
-rw-r--r--kernel/bpf/local_storage.c13
-rw-r--r--kernel/bpf/lpm_trie.c8
-rw-r--r--kernel/bpf/queue_stack_maps.c13
-rw-r--r--kernel/bpf/reuseport_array.c17
-rw-r--r--kernel/bpf/stackmap.c28
-rw-r--r--kernel/bpf/syscall.c103
-rw-r--r--kernel/bpf/verifier.c1175
-rw-r--r--kernel/bpf/xskmap.c19
-rw-r--r--kernel/cgroup/cgroup.c11
-rw-r--r--kernel/trace/bpf_trace.c96
17 files changed, 1390 insertions, 294 deletions
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 4c2fa3ac56f6..29d781061cd5 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-y := core.o
+CFLAGS_core.o += $(call cc-disable-warning, override-init)
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 262a321f58a6..1c65ce0098a9 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -75,6 +75,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
u32 elem_size, index_mask, max_entries;
bool unpriv = !capable(CAP_SYS_ADMIN);
u64 cost, array_size, mask64;
+ struct bpf_map_memory mem;
struct bpf_array *array;
elem_size = round_up(attr->value_size, 8);
@@ -108,32 +109,29 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
/* make sure there is no u32 overflow later in round_up() */
cost = array_size;
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-ENOMEM);
- if (percpu) {
+ if (percpu)
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-ENOMEM);
- }
- cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- ret = bpf_map_precharge_memlock(cost);
+ ret = bpf_map_charge_init(&mem, cost);
if (ret < 0)
return ERR_PTR(ret);
/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size, numa_node);
- if (!array)
+ if (!array) {
+ bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
+ }
array->index_mask = index_mask;
array->map.unpriv_array = unpriv;
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
- array->map.pages = cost;
+ bpf_map_charge_move(&array->map.memory, &mem);
array->elem_size = elem_size;
if (percpu && bpf_array_alloc_percpu(array)) {
+ bpf_map_charge_finish(&array->map.memory);
bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 92a7d0cf8d13..c225c42e114a 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -19,13 +19,23 @@
DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
EXPORT_SYMBOL(cgroup_bpf_enabled_key);
+void cgroup_bpf_offline(struct cgroup *cgrp)
+{
+ cgroup_get(cgrp);
+ percpu_ref_kill(&cgrp->bpf.refcnt);
+}
+
/**
- * cgroup_bpf_put() - put references of all bpf programs
- * @cgrp: the cgroup to modify
+ * cgroup_bpf_release() - put references of all bpf programs and
+ * release all cgroup bpf data
+ * @work: work structure embedded into the cgroup to modify
*/
-void cgroup_bpf_put(struct cgroup *cgrp)
+static void cgroup_bpf_release(struct work_struct *work)
{
+ struct cgroup *cgrp = container_of(work, struct cgroup,
+ bpf.release_work);
enum bpf_cgroup_storage_type stype;
+ struct bpf_prog_array *old_array;
unsigned int type;
for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
@@ -42,8 +52,27 @@ void cgroup_bpf_put(struct cgroup *cgrp)
kfree(pl);
static_branch_dec(&cgroup_bpf_enabled_key);
}
- bpf_prog_array_free(cgrp->bpf.effective[type]);
+ old_array = rcu_dereference_protected(
+ cgrp->bpf.effective[type],
+ percpu_ref_is_dying(&cgrp->bpf.refcnt));
+ bpf_prog_array_free(old_array);
}
+
+ percpu_ref_exit(&cgrp->bpf.refcnt);
+ cgroup_put(cgrp);
+}
+
+/**
+ * cgroup_bpf_release_fn() - callback used to schedule releasing
+ * of bpf cgroup data
+ * @ref: percpu ref counter structure
+ */
+static void cgroup_bpf_release_fn(struct percpu_ref *ref)
+{
+ struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
+
+ INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
+ queue_work(system_wq, &cgrp->bpf.release_work);
}
/* count number of elements in the list.
@@ -98,7 +127,7 @@ static bool hierarchy_allows_attach(struct cgroup *cgrp,
*/
static int compute_effective_progs(struct cgroup *cgrp,
enum bpf_attach_type type,
- struct bpf_prog_array __rcu **array)
+ struct bpf_prog_array **array)
{
enum bpf_cgroup_storage_type stype;
struct bpf_prog_array *progs;
@@ -136,17 +165,16 @@ static int compute_effective_progs(struct cgroup *cgrp,
}
} while ((p = cgroup_parent(p)));
- rcu_assign_pointer(*array, progs);
+ *array = progs;
return 0;
}
static void activate_effective_progs(struct cgroup *cgrp,
enum bpf_attach_type type,
- struct bpf_prog_array __rcu *array)
+ struct bpf_prog_array *old_array)
{
- struct bpf_prog_array __rcu *old_array;
-
- old_array = xchg(&cgrp->bpf.effective[type], array);
+ rcu_swap_protected(cgrp->bpf.effective[type], old_array,
+ lockdep_is_held(&cgroup_mutex));
/* free prog array after grace period, since __cgroup_bpf_run_*()
* might be still walking the array
*/
@@ -163,8 +191,13 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
* that array below is variable length
*/
#define NR ARRAY_SIZE(cgrp->bpf.effective)
- struct bpf_prog_array __rcu *arrays[NR] = {};
- int i;
+ struct bpf_prog_array *arrays[NR] = {};
+ int ret, i;
+
+ ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
for (i = 0; i < NR; i++)
INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
@@ -180,6 +213,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
cleanup:
for (i = 0; i < NR; i++)
bpf_prog_array_free(arrays[i]);
+
+ percpu_ref_exit(&cgrp->bpf.refcnt);
+
return -ENOMEM;
}
@@ -441,10 +477,14 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
enum bpf_attach_type type = attr->query.attach_type;
struct list_head *progs = &cgrp->bpf.progs[type];
u32 flags = cgrp->bpf.flags[type];
+ struct bpf_prog_array *effective;
int cnt, ret = 0, i;
+ effective = rcu_dereference_protected(cgrp->bpf.effective[type],
+ lockdep_is_held(&cgroup_mutex));
+
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
- cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
+ cnt = bpf_prog_array_length(effective);
else
cnt = prog_list_length(progs);
@@ -461,8 +501,7 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
}
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
- return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
- prog_ids, cnt);
+ return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
} else {
struct bpf_prog_list *pl;
u32 id;
@@ -545,8 +584,16 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
* The program type passed in via @type must be suitable for network
* filtering. No further check is performed to assert that.
*
- * This function will return %-EPERM if any if an attached program was found
- * and if it returned != 1 during execution. In all other cases, 0 is returned.
+ * For egress packets, this function can return:
+ * NET_XMIT_SUCCESS (0) - continue with packet output
+ * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
+ * NET_XMIT_CN (2) - continue with packet output and notify TCP
+ * to call cwr
+ * -EPERM - drop packet
+ *
+ * For ingress packets, this function will return -EPERM if any
+ * attached program was found and if it returned != 1 during execution.
+ * Otherwise 0 is returned.
*/
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
@@ -572,12 +619,19 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
/* compute pointers for the bpf prog */
bpf_compute_and_save_data_end(skb, &saved_data_end);
- ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
- __bpf_prog_run_save_cb);
+ if (type == BPF_CGROUP_INET_EGRESS) {
+ ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
+ cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
+ } else {
+ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
+ __bpf_prog_run_save_cb);
+ ret = (ret == 1 ? 0 : -EPERM);
+ }
bpf_restore_data_end(skb, saved_data_end);
__skb_pull(skb, offset);
skb->sk = save_sk;
- return ret == 1 ? 0 : -EPERM;
+
+ return ret;
}
EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 080e2bb644cc..ad3be85f1411 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1791,38 +1791,33 @@ struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
return &empty_prog_array.hdr;
}
-void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
+void bpf_prog_array_free(struct bpf_prog_array *progs)
{
- if (!progs ||
- progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
+ if (!progs || progs == &empty_prog_array.hdr)
return;
kfree_rcu(progs, rcu);
}
-int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
+int bpf_prog_array_length(struct bpf_prog_array *array)
{
struct bpf_prog_array_item *item;
u32 cnt = 0;
- rcu_read_lock();
- item = rcu_dereference(array)->items;
- for (; item->prog; item++)
+ for (item = array->items; item->prog; item++)
if (item->prog != &dummy_bpf_prog.prog)
cnt++;
- rcu_read_unlock();
return cnt;
}
-static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
+static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
u32 *prog_ids,
u32 request_cnt)
{
struct bpf_prog_array_item *item;
int i = 0;
- item = rcu_dereference_check(array, 1)->items;
- for (; item->prog; item++) {
+ for (item = array->items; item->prog; item++) {
if (item->prog == &dummy_bpf_prog.prog)
continue;
prog_ids[i] = item->prog->aux->id;
@@ -1835,7 +1830,7 @@ static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
return !!(item->prog);
}
-int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
+int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
__u32 __user *prog_ids, u32 cnt)
{
unsigned long err = 0;
@@ -1846,18 +1841,12 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
* cnt = bpf_prog_array_length();
* if (cnt > 0)
* bpf_prog_array_copy_to_user(..., cnt);
- * so below kcalloc doesn't need extra cnt > 0 check, but
- * bpf_prog_array_length() releases rcu lock and
- * prog array could have been swapped with empty or larger array,
- * so always copy 'cnt' prog_ids to the user.
- * In a rare race the user will see zero prog_ids
+ * so below kcalloc doesn't need extra cnt > 0 check.
*/
ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
if (!ids)
return -ENOMEM;
- rcu_read_lock();
nospc = bpf_prog_array_copy_core(array, ids, cnt);
- rcu_read_unlock();
err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
kfree(ids);
if (err)
@@ -1867,19 +1856,19 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
return 0;
}
-void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
+void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
struct bpf_prog *old_prog)
{
- struct bpf_prog_array_item *item = array->items;
+ struct bpf_prog_array_item *item;
- for (; item->prog; item++)
+ for (item = array->items; item->prog; item++)
if (item->prog == old_prog) {
WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
break;
}
}
-int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
+int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
struct bpf_prog_array **new_array)
@@ -1943,7 +1932,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
return 0;
}
-int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+int bpf_prog_array_copy_info(struct bpf_prog_array *array,
u32 *prog_ids, u32 request_cnt,
u32 *prog_cnt)
{
@@ -2086,6 +2075,15 @@ bool __weak bpf_helper_changes_pkt_data(void *func)
return false;
}
+/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
+ * analysis code and wants explicit zero extension inserted by verifier.
+ * Otherwise, return FALSE.
+ */
+bool __weak bpf_jit_needs_zext(void)
+{
+ return false;
+}
+
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
* skb_copy_bits(), so provide a weak definition of it for NET-less config.
*/
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 8ebd0fa826f8..8dff08768087 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -106,12 +106,9 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
/* make sure page count doesn't overflow */
cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- goto free_cmap;
- cmap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* Notice returns -EPERM on if map size is larger than memlock limit */
- ret = bpf_map_precharge_memlock(cmap->map.pages);
+ ret = bpf_map_charge_init(&cmap->map.memory, cost);
if (ret) {
err = ret;
goto free_cmap;
@@ -121,7 +118,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
cmap->flush_needed = __alloc_percpu(cpu_map_bitmap_size(attr),
__alignof__(unsigned long));
if (!cmap->flush_needed)
- goto free_cmap;
+ goto free_charge;
/* Alloc array for possible remote "destination" CPUs */
cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
@@ -133,6 +130,8 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
return &cmap->map;
free_percpu:
free_percpu(cmap->flush_needed);
+free_charge:
+ bpf_map_charge_finish(&cmap->map.memory);
free_cmap:
kfree(cmap);
return ERR_PTR(err);
@@ -209,6 +208,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
* - RX ring dev queue index (skb_record_rx_queue)
*/
+ /* Until page_pool get SKB return path, release DMA here */
+ xdp_release_frame(xdpf);
+
/* Allow SKB to reuse area used by xdp_frame */
xdp_scrub_frame(xdpf);
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index cd8297b3bdb9..40e86a7e0ef0 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -80,8 +80,8 @@ static u64 dev_map_bitmap_size(const union bpf_attr *attr)
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
{
struct bpf_dtab *dtab;
- int err = -EINVAL;
u64 cost;
+ int err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -100,13 +100,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
/* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
cost += dev_map_bitmap_size(attr) * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- goto free_dtab;
-
- dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- /* if map size is larger than memlock limit, reject it early */
- err = bpf_map_precharge_memlock(dtab->map.pages);
+ /* if map size is larger than memlock limit, reject it */
+ err = bpf_map_charge_init(&dtab->map.memory, cost);
if (err)
goto free_dtab;
@@ -117,19 +113,21 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
__alignof__(unsigned long),
GFP_KERNEL | __GFP_NOWARN);
if (!dtab->flush_needed)
- goto free_dtab;
+ goto free_charge;
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
- goto free_dtab;
+ goto free_charge;
spin_lock(&dev_map_lock);
list_add_tail_rcu(&dtab->list, &dev_map_list);
spin_unlock(&dev_map_lock);
return &dtab->map;
+free_charge:
+ bpf_map_charge_finish(&dtab->map.memory);
free_dtab:
free_percpu(dtab->flush_needed);
kfree(dtab);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 583df5cb302d..22066a62c8c9 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -352,14 +352,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else
cost += (u64) htab->elem_size * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- /* make sure page count doesn't overflow */
- goto free_htab;
-
- htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-
- /* if map size is larger than memlock limit, reject it early */
- err = bpf_map_precharge_memlock(htab->map.pages);
+ /* if map size is larger than memlock limit, reject it */
+ err = bpf_map_charge_init(&htab->map.memory, cost);
if (err)
goto free_htab;
@@ -368,7 +362,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
sizeof(struct bucket),
htab->map.numa_node);
if (!htab->buckets)
- goto free_htab;
+ goto free_charge;
if (htab->map.map_flags & BPF_F_ZERO_SEED)
htab->hashrnd = 0;
@@ -401,6 +395,8 @@ free_prealloc:
prealloc_destroy(htab);
free_buckets:
bpf_map_area_free(htab->buckets);
+free_charge:
+ bpf_map_charge_finish(&htab->map.memory);
free_htab:
kfree(htab);
return ERR_PTR(err);
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 980e8f1f6cb5..addd6fdceec8 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -272,6 +272,8 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
{
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_cgroup_storage_map *map;
+ struct bpf_map_memory mem;
+ int ret;
if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
return ERR_PTR(-EINVAL);
@@ -290,13 +292,18 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
/* max_entries is not used and enforced to be 0 */
return ERR_PTR(-EINVAL);
+ ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
+ if (ret < 0)
+ return ERR_PTR(ret);
+
map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
__GFP_ZERO | GFP_USER, numa_node);
- if (!map)
+ if (!map) {
+ bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
+ }
- map->map.pages = round_up(sizeof(struct bpf_cgroup_storage_map),
- PAGE_SIZE) >> PAGE_SHIFT;
+ bpf_map_charge_move(&map->map.memory, &mem);
/* copy mandatory map attributes */
bpf_map_init_from_attr(&map->map, attr);
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 57b59cca4db7..56e6c75d354d 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -570,14 +570,8 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
cost_per_node = sizeof(struct lpm_trie_node) +
attr->value_size + trie->data_size;
cost += (u64) attr->max_entries * cost_per_node;
- if (cost >= U32_MAX - PAGE_SIZE) {
- ret = -E2BIG;
- goto out_err;
- }
-
- trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- ret = bpf_map_precharge_memlock(trie->map.pages);
+ ret = bpf_map_charge_init(&trie->map.memory, cost);
if (ret)
goto out_err;
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 0b140d236889..f697647ceb54 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -67,29 +67,28 @@ static int queue_stack_map_alloc_check(union bpf_attr *attr)
static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
{
int ret, numa_node = bpf_map_attr_numa_node(attr);
+ struct bpf_map_memory mem = {0};
struct bpf_queue_stack *qs;
u64 size, queue_size, cost;
size = (u64) attr->max_entries + 1;
cost = queue_size = sizeof(*qs) + size * attr->value_size;
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-E2BIG);
- cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-
- ret = bpf_map_precharge_memlock(cost);
+ ret = bpf_map_charge_init(&mem, cost);
if (ret < 0)
return ERR_PTR(ret);
qs = bpf_map_area_alloc(queue_size, numa_node);
- if (!qs)
+ if (!qs) {
+ bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
+ }
memset(qs, 0, sizeof(*qs));
bpf_map_init_from_attr(&qs->map, attr);
- qs->map.pages = cost;
+ bpf_map_charge_move(&qs->map.memory, &mem);
qs->size = size;
raw_spin_lock_init(&qs->lock);
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 18e225de80ff..50c083ba978c 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -151,7 +151,8 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
{
int err, numa_node = bpf_map_attr_numa_node(attr);
struct reuseport_array *array;
- u64 cost, array_size;
+ struct bpf_map_memory mem;
+ u64 array_size;
if (!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
@@ -159,24 +160,20 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
array_size = sizeof(*array);
array_size += (u64)attr->max_entries * sizeof(struct sock *);
- /* make sure there is no u32 overflow later in round_up() */
- cost = array_size;
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-ENOMEM);
- cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-
- err = bpf_map_precharge_memlock(cost);
+ err = bpf_map_charge_init(&mem, array_size);
if (err)
return ERR_PTR(err);
/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size, numa_node);
- if (!array)
+ if (!array) {
+ bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
+ }
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
- array->map.pages = cost;
+ bpf_map_charge_move(&array->map.memory, &mem);
return &array->map;
}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index d38e49f943a1..052580c33d26 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -86,6 +86,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
{
u32 value_size = attr->value_size;
struct bpf_stack_map *smap;
+ struct bpf_map_memory mem;
u64 cost, n_buckets;
int err;
@@ -113,40 +114,37 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
n_buckets = roundup_pow_of_two(attr->max_entries);
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-E2BIG);
+ cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
+ err = bpf_map_charge_init(&mem, cost);
+ if (err)
+ return ERR_PTR(err);
smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
- if (!smap)
+ if (!smap) {
+ bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
-
- err = -E2BIG;
- cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
- if (cost >= U32_MAX - PAGE_SIZE)
- goto free_smap;
+ }
bpf_map_init_from_attr(&smap->map, attr);
smap->map.value_size = value_size;
smap->n_buckets = n_buckets;
- smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-
- err = bpf_map_precharge_memlock(smap->map.pages);
- if (err)
- goto free_smap;
err = get_callchain_buffers(sysctl_perf_event_max_stack);
if (err)
- goto free_smap;
+ goto free_charge;
err = prealloc_elems_and_freelist(smap);
if (err)
goto put_buffers;
+ bpf_map_charge_move(&smap->map.memory, &mem);
+
return &smap->map;
put_buffers:
put_callchain_buffers();
-free_smap:
+free_charge:
+ bpf_map_charge_finish(&mem);
bpf_map_area_free(smap);
return ERR_PTR(err);
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 42d17f730780..7713cf39795a 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -180,19 +180,6 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
map->numa_node = bpf_map_attr_numa_node(attr);
}
-int bpf_map_precharge_memlock(u32 pages)
-{
- struct user_struct *user = get_current_user();
- unsigned long memlock_limit, cur;
-
- memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- cur = atomic_long_read(&user->locked_vm);
- free_uid(user);
- if (cur + pages > memlock_limit)
- return -EPERM;
- return 0;
-}
-
static int bpf_charge_memlock(struct user_struct *user, u32 pages)
{
unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -206,45 +193,62 @@ static int bpf_charge_memlock(struct user_struct *user, u32 pages)
static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
{
- atomic_long_sub(pages, &user->locked_vm);
+ if (user)
+ atomic_long_sub(pages, &user->locked_vm);
}
-static int bpf_map_init_memlock(struct bpf_map *map)
+int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size)
{
- struct user_struct *user = get_current_user();
+ u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
+ struct user_struct *user;
int ret;
- ret = bpf_charge_memlock(user, map->pages);
+ if (size >= U32_MAX - PAGE_SIZE)
+ return -E2BIG;
+
+ user = get_current_user();
+ ret = bpf_charge_memlock(user, pages);
if (ret) {
free_uid(user);
return ret;
}
- map->user = user;
- return ret;
+
+ mem->pages = pages;
+ mem->user = user;
+
+ return 0;
}
-static void bpf_map_release_memlock(struct bpf_map *map)
+void bpf_map_charge_finish(struct bpf_map_memory *mem)
{
- struct user_struct *user = map->user;
- bpf_uncharge_memlock(user, map->pages);
- free_uid(user);
+ bpf_uncharge_memlock(mem->user, mem->pages);
+ free_uid(mem->user);
+}
+
+void bpf_map_charge_move(struct bpf_map_memory *dst,
+ struct bpf_map_memory *src)
+{
+ *dst = *src;
+
+ /* Make sure src will not be used for the redundant uncharging. */
+ memset(src, 0, sizeof(struct bpf_map_memory));
}
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
{
int ret;
- ret = bpf_charge_memlock(map->user, pages);
+ ret = bpf_charge_memlock(map->memory.user, pages);
if (ret)
return ret;
- map->pages += pages;
+ map->memory.pages += pages;
return ret;
}
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
{
- bpf_uncharge_memlock(map->user, pages);
- map->pages -= pages;
+ bpf_uncharge_memlock(map->memory.user, pages);
+ map->memory.pages -= pages;
}
static int bpf_map_alloc_id(struct bpf_map *map)
@@ -295,11 +299,13 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
static void bpf_map_free_deferred(struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_map, work);
+ struct bpf_map_memory mem;
- bpf_map_release_memlock(map);
+ bpf_map_charge_move(&mem, &map->memory);
security_bpf_map_free(map);
/* implementation dependent freeing */
map->ops->map_free(map);
+ bpf_map_charge_finish(&mem);
}
static void bpf_map_put_uref(struct bpf_map *map)
@@ -387,7 +393,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
map->value_size,
map->max_entries,
map->map_flags,
- map->pages * 1ULL << PAGE_SHIFT,
+ map->memory.pages * 1ULL << PAGE_SHIFT,
map->id,
READ_ONCE(map->frozen));
@@ -541,6 +547,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
static int map_create(union bpf_attr *attr)
{
int numa_node = bpf_map_attr_numa_node(attr);
+ struct bpf_map_memory mem;
struct bpf_map *map;
int f_flags;
int err;
@@ -565,7 +572,7 @@ static int map_create(union bpf_attr *attr)
err = bpf_obj_name_cpy(map->name, attr->map_name);
if (err)
- goto free_map_nouncharge;
+ goto free_map;
atomic_set(&map->refcnt, 1);
atomic_set(&map->usercnt, 1);
@@ -575,20 +582,20 @@ static int map_create(union bpf_attr *attr)
if (!attr->btf_value_type_id) {
err = -EINVAL;
- goto free_map_nouncharge;
+ goto free_map;
}
btf = btf_get_by_fd(attr->btf_fd);
if (IS_ERR(btf)) {
err = PTR_ERR(btf);
- goto free_map_nouncharge;
+ goto free_map;
}
err = map_check_btf(map, btf, attr->btf_key_type_id,
attr->btf_value_type_id);
if (err) {
btf_put(btf);
- goto free_map_nouncharge;
+ goto free_map;
}
map->btf = btf;
@@ -600,15 +607,11 @@ static int map_create(union bpf_attr *attr)
err = security_bpf_map_alloc(map);
if (err)
- goto free_map_nouncharge;
-
- err = bpf_map_init_memlock(map);
- if (err)
- goto free_map_sec;
+ goto free_map;
err = bpf_map_alloc_id(map);
if (err)
- goto free_map;
+ goto free_map_sec;
err = bpf_map_new_fd(map, f_flags);
if (err < 0) {
@@ -624,13 +627,13 @@ static int map_create(union bpf_attr *attr)
return err;
-free_map:
- bpf_map_release_memlock(map);
free_map_sec:
security_bpf_map_free(map);
-free_map_nouncharge:
+free_map:
btf_put(map->btf);
+ bpf_map_charge_move(&mem, &map->memory);
map->ops->map_free(map);
+ bpf_map_charge_finish(&mem);
return err;
}
@@ -1579,6 +1582,14 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
default:
return -EINVAL;
}
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ switch (expected_attach_type) {
+ case BPF_CGROUP_INET_INGRESS:
+ case BPF_CGROUP_INET_EGRESS:
+ return 0;
+ default:
+ return -EINVAL;
+ }
default:
return 0;
}
@@ -1598,7 +1609,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (CHECK_ATTR(BPF_PROG_LOAD))
return -EINVAL;
- if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT))
+ if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
+ BPF_F_ANY_ALIGNMENT |
+ BPF_F_TEST_RND_HI32))
return -EINVAL;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
@@ -1828,6 +1841,10 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
case BPF_PROG_TYPE_CGROUP_SOCK:
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ return prog->enforce_expected_attach_type &&
+ prog->expected_attach_type != attach_type ?
+ -EINVAL : 0;
default:
return 0;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a5c369e60343..0e079b2298f8 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -168,7 +168,7 @@ struct bpf_verifier_stack_elem {
struct bpf_verifier_stack_elem *next;
};
-#define BPF_COMPLEXITY_LIMIT_STACK 1024
+#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
#define BPF_COMPLEXITY_LIMIT_STATES 64
#define BPF_MAP_PTR_UNPRIV 1UL
@@ -326,7 +326,8 @@ static bool type_is_sk_pointer(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET ||
type == PTR_TO_SOCK_COMMON ||
- type == PTR_TO_TCP_SOCK;
+ type == PTR_TO_TCP_SOCK ||
+ type == PTR_TO_XDP_SOCK;
}
static bool reg_type_may_be_null(enum bpf_reg_type type)
@@ -398,6 +399,7 @@ static const char * const reg_type_str[] = {
[PTR_TO_TCP_SOCK] = "tcp_sock",
[PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
[PTR_TO_TP_BUFFER] = "tp_buffer",
+ [PTR_TO_XDP_SOCK] = "xdp_sock",
};
static char slot_type_char[] = {
@@ -445,12 +447,12 @@ static void print_verifier_state(struct bpf_verifier_env *env,
verbose(env, " R%d", i);
print_liveness(env, reg->live);
verbose(env, "=%s", reg_type_str[t]);
+ if (t == SCALAR_VALUE && reg->precise)
+ verbose(env, "P");
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
- if (t == PTR_TO_STACK)
- verbose(env, ",call_%d", func(env, reg)->callsite);
} else {
verbose(env, "(id=%d", reg->id);
if (reg_type_may_be_refcounted_or_null(t))
@@ -512,11 +514,17 @@ static void print_verifier_state(struct bpf_verifier_env *env,
continue;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, state->stack[i].spilled_ptr.live);
- if (state->stack[i].slot_type[0] == STACK_SPILL)
- verbose(env, "=%s",
- reg_type_str[state->stack[i].spilled_ptr.type]);
- else
+ if (state->stack[i].slot_type[0] == STACK_SPILL) {
+ reg = &state->stack[i].spilled_ptr;
+ t = reg->type;
+ verbose(env, "=%s", reg_type_str[t]);
+ if (t == SCALAR_VALUE && reg->precise)
+ verbose(env, "P");
+ if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
+ verbose(env, "%lld", reg->var_off.value + reg->off);
+ } else {
verbose(env, "=%s", types_buf);
+ }
}
if (state->acquired_refs && state->refs[0].id) {
verbose(env, " refs=%d", state->refs[0].id);
@@ -665,6 +673,13 @@ static void free_func_state(struct bpf_func_state *state)
kfree(state);
}
+static void clear_jmp_history(struct bpf_verifier_state *state)
+{
+ kfree(state->jmp_history);
+ state->jmp_history = NULL;
+ state->jmp_history_cnt = 0;
+}
+
static void free_verifier_state(struct bpf_verifier_state *state,
bool free_self)
{
@@ -674,6 +689,7 @@ static void free_verifier_state(struct bpf_verifier_state *state,
free_func_state(state->frame[i]);
state->frame[i] = NULL;
}
+ clear_jmp_history(state);
if (free_self)
kfree(state);
}
@@ -701,8 +717,18 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
const struct bpf_verifier_state *src)
{
struct bpf_func_state *dst;
+ u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
int i, err;
+ if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
+ kfree(dst_state->jmp_history);
+ dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
+ if (!dst_state->jmp_history)
+ return -ENOMEM;
+ }
+ memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
+ dst_state->jmp_history_cnt = src->jmp_history_cnt;
+
/* if dst has more stack frames then src frame, free them */
for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
free_func_state(dst_state->frame[i]);
@@ -711,6 +737,10 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
dst_state->speculative = src->speculative;
dst_state->curframe = src->curframe;
dst_state->active_spin_lock = src->active_spin_lock;
+ dst_state->branches = src->branches;
+ dst_state->parent = src->parent;
+ dst_state->first_insn_idx = src->first_insn_idx;
+ dst_state->last_insn_idx = src->last_insn_idx;
for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i];
if (!dst) {
@@ -726,6 +756,23 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
return 0;
}
+static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
+{
+ while (st) {
+ u32 br = --st->branches;
+
+ /* WARN_ON(br > 1) technically makes sense here,
+ * but see comment in push_stack(), hence:
+ */
+ WARN_ONCE((int)br < 0,
+ "BUG update_branch_counts:branches_to_explore=%d\n",
+ br);
+ if (br)
+ break;
+ st = st->parent;
+ }
+}
+
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
int *insn_idx)
{
@@ -774,10 +821,23 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
if (err)
goto err;
elem->st.speculative |= speculative;
- if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
- verbose(env, "BPF program is too complex\n");
+ if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
+ verbose(env, "The sequence of %d jumps is too complex.\n",
+ env->stack_size);
goto err;
}
+ if (elem->st.parent) {
+ ++elem->st.parent->branches;
+ /* WARN_ON(branches > 2) technically makes sense here,
+ * but
+ * 1. speculative states will bump 'branches' for non-branch
+ * instructions
+ * 2. is_state_visited() heuristics may decide not to create
+ * a new state for a sequence of branches and all such current
+ * and cloned states will be pointing to a single parent state
+ * which might have large 'branches' count.
+ */
+ }
return &elem->st;
err:
free_verifier_state(env->cur_state, true);
@@ -925,6 +985,9 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
reg->smax_value = S64_MAX;
reg->umin_value = 0;
reg->umax_value = U64_MAX;
+
+ /* constant backtracking is enabled for root only for now */
+ reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
}
/* Mark a register as having a completely unknown (scalar) value. */
@@ -973,6 +1036,7 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
__mark_reg_not_init(regs + regno);
}
+#define DEF_NOT_SUBREG (0)
static void init_reg_state(struct bpf_verifier_env *env,
struct bpf_func_state *state)
{
@@ -983,6 +1047,7 @@ static void init_reg_state(struct bpf_verifier_env *env,
mark_reg_not_init(env, regs, i);
regs[i].live = REG_LIVE_NONE;
regs[i].parent = NULL;
+ regs[i].subreg_def = DEF_NOT_SUBREG;
}
/* frame pointer */
@@ -1128,7 +1193,7 @@ next:
*/
static int mark_reg_read(struct bpf_verifier_env *env,
const struct bpf_reg_state *state,
- struct bpf_reg_state *parent)
+ struct bpf_reg_state *parent, u8 flag)
{
bool writes = parent == state->parent; /* Observe write marks */
int cnt = 0;
@@ -1143,17 +1208,26 @@ static int mark_reg_read(struct bpf_verifier_env *env,
parent->var_off.value, parent->off);
return -EFAULT;
}
- if (parent->live & REG_LIVE_READ)
+ /* The first condition is more likely to be true than the
+ * second, checked it first.
+ */
+ if ((parent->live & REG_LIVE_READ) == flag ||
+ parent->live & REG_LIVE_READ64)
/* The parentage chain never changes and
* this parent was already marked as LIVE_READ.
* There is no need to keep walking the chain again and
* keep re-marking all parents as LIVE_READ.
* This case happens when the same register is read
* multiple times without writes into it in-between.
+ * Also, if parent has the stronger REG_LIVE_READ64 set,
+ * then no need to set the weak REG_LIVE_READ32.
*/
break;
/* ... then we depend on parent's value */
- parent->live |= REG_LIVE_READ;
+ parent->live |= flag;
+ /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
+ if (flag == REG_LIVE_READ64)
+ parent->live &= ~REG_LIVE_READ32;
state = parent;
parent = state->parent;
writes = true;
@@ -1165,12 +1239,129 @@ static int mark_reg_read(struct bpf_verifier_env *env,
return 0;
}
+/* This function is supposed to be used by the following 32-bit optimization
+ * code only. It returns TRUE if the source or destination register operates
+ * on 64-bit, otherwise return FALSE.
+ */
+static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
+{
+ u8 code, class, op;
+
+ code = insn->code;
+ class = BPF_CLASS(code);
+ op = BPF_OP(code);
+ if (class == BPF_JMP) {
+ /* BPF_EXIT for "main" will reach here. Return TRUE
+ * conservatively.
+ */
+ if (op == BPF_EXIT)
+ return true;
+ if (op == BPF_CALL) {
+ /* BPF to BPF call will reach here because of marking
+ * caller saved clobber with DST_OP_NO_MARK for which we
+ * don't care the register def because they are anyway
+ * marked as NOT_INIT already.
+ */
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ return false;
+ /* Helper call will reach here because of arg type
+ * check, conservatively return TRUE.
+ */
+ if (t == SRC_OP)
+ return true;
+
+ return false;
+ }
+ }
+
+ if (class == BPF_ALU64 || class == BPF_JMP ||
+ /* BPF_END always use BPF_ALU class. */
+ (class == BPF_ALU && op == BPF_END && insn->imm == 64))
+ return true;
+
+ if (class == BPF_ALU || class == BPF_JMP32)
+ return false;
+
+ if (class == BPF_LDX) {
+ if (t != SRC_OP)
+ return BPF_SIZE(code) == BPF_DW;
+ /* LDX source must be ptr. */
+ return true;
+ }
+
+ if (class == BPF_STX) {
+ if (reg->type != SCALAR_VALUE)
+ return true;
+ return BPF_SIZE(code) == BPF_DW;
+ }
+
+ if (class == BPF_LD) {
+ u8 mode = BPF_MODE(code);
+
+ /* LD_IMM64 */
+ if (mode == BPF_IMM)
+ return true;
+
+ /* Both LD_IND and LD_ABS return 32-bit data. */
+ if (t != SRC_OP)
+ return false;
+
+ /* Implicit ctx ptr. */
+ if (regno == BPF_REG_6)
+ return true;
+
+ /* Explicit source could be any width. */
+ return true;
+ }
+
+ if (class == BPF_ST)
+ /* The only source register for BPF_ST is a ptr. */
+ return true;
+
+ /* Conservatively return true at default. */
+ return true;
+}
+
+/* Return TRUE if INSN doesn't have explicit value define. */
+static bool insn_no_def(struct bpf_insn *insn)
+{
+ u8 class = BPF_CLASS(insn->code);
+
+ return (class == BPF_JMP || class == BPF_JMP32 ||
+ class == BPF_STX || class == BPF_ST);
+}
+
+/* Return TRUE if INSN has defined any 32-bit value explicitly. */
+static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
+{
+ if (insn_no_def(insn))
+ return false;
+
+ return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
+}
+
+static void mark_insn_zext(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg)
+{
+ s32 def_idx = reg->subreg_def;
+
+ if (def_idx == DEF_NOT_SUBREG)
+ return;
+
+ env->insn_aux_data[def_idx - 1].zext_dst = true;
+ /* The dst will be zero extended, so won't be sub-register anymore. */
+ reg->subreg_def = DEF_NOT_SUBREG;
+}
+
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
enum reg_arg_type t)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
struct bpf_reg_state *reg, *regs = state->regs;
+ bool rw64;
if (regno >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", regno);
@@ -1178,6 +1369,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
}
reg = &regs[regno];
+ rw64 = is_reg64(env, insn, regno, reg, t);
if (t == SRC_OP) {
/* check whether register used as source operand can be read */
if (reg->type == NOT_INIT) {
@@ -1188,7 +1380,11 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
if (regno == BPF_REG_FP)
return 0;
- return mark_reg_read(env, reg, reg->parent);
+ if (rw64)
+ mark_insn_zext(env, reg);
+
+ return mark_reg_read(env, reg, reg->parent,
+ rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
@@ -1196,12 +1392,396 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
return -EACCES;
}
reg->live |= REG_LIVE_WRITTEN;
+ reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
if (t == DST_OP)
mark_reg_unknown(env, regs, regno);
}
return 0;
}
+/* for any branch, call, exit record the history of jmps in the given state */
+static int push_jmp_history(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *cur)
+{
+ u32 cnt = cur->jmp_history_cnt;
+ struct bpf_idx_pair *p;
+
+ cnt++;
+ p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
+ if (!p)
+ return -ENOMEM;
+ p[cnt - 1].idx = env->insn_idx;
+ p[cnt - 1].prev_idx = env->prev_insn_idx;
+ cur->jmp_history = p;
+ cur->jmp_history_cnt = cnt;
+ return 0;
+}
+
+/* Backtrack one insn at a time. If idx is not at the top of recorded
+ * history then previous instruction came from straight line execution.
+ */
+static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
+ u32 *history)
+{
+ u32 cnt = *history;
+
+ if (cnt && st->jmp_history[cnt - 1].idx == i) {
+ i = st->jmp_history[cnt - 1].prev_idx;
+ (*history)--;
+ } else {
+ i--;
+ }
+ return i;
+}
+
+/* For given verifier state backtrack_insn() is called from the last insn to
+ * the first insn. Its purpose is to compute a bitmask of registers and
+ * stack slots that needs precision in the parent verifier state.
+ */
+static int backtrack_insn(struct bpf_verifier_env *env, int idx,
+ u32 *reg_mask, u64 *stack_mask)
+{
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = verbose,
+ .private_data = env,
+ };
+ struct bpf_insn *insn = env->prog->insnsi + idx;
+ u8 class = BPF_CLASS(insn->code);
+ u8 opcode = BPF_OP(insn->code);
+ u8 mode = BPF_MODE(insn->code);
+ u32 dreg = 1u << insn->dst_reg;
+ u32 sreg = 1u << insn->src_reg;
+ u32 spi;
+
+ if (insn->code == 0)
+ return 0;
+ if (env->log.level & BPF_LOG_LEVEL) {
+ verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
+ verbose(env, "%d: ", idx);
+ print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
+ }
+
+ if (class == BPF_ALU || class == BPF_ALU64) {
+ if (!(*reg_mask & dreg))
+ return 0;
+ if (opcode == BPF_MOV) {
+ if (BPF_SRC(insn->code) == BPF_X) {
+ /* dreg = sreg
+ * dreg needs precision after this insn
+ * sreg needs precision before this insn
+ */
+ *reg_mask &= ~dreg;
+ *reg_mask |= sreg;
+ } else {
+ /* dreg = K
+ * dreg needs precision after this insn.
+ * Corresponding register is already marked
+ * as precise=true in this verifier state.
+ * No further markings in parent are necessary
+ */
+ *reg_mask &= ~dreg;
+ }
+ } else {
+ if (BPF_SRC(insn->code) == BPF_X) {
+ /* dreg += sreg
+ * both dreg and sreg need precision
+ * before this insn
+ */
+ *reg_mask |= sreg;
+ } /* else dreg += K
+ * dreg still needs precision before this insn
+ */
+ }
+ } else if (class == BPF_LDX) {
+ if (!(*reg_mask & dreg))
+ return 0;
+ *reg_mask &= ~dreg;
+
+ /* scalars can only be spilled into stack w/o losing precision.
+ * Load from any other memory can be zero extended.
+ * The desire to keep that precision is already indicated
+ * by 'precise' mark in corresponding register of this state.
+ * No further tracking necessary.
+ */
+ if (insn->src_reg != BPF_REG_FP)
+ return 0;
+ if (BPF_SIZE(insn->code) != BPF_DW)
+ return 0;
+
+ /* dreg = *(u64 *)[fp - off] was a fill from the stack.
+ * that [fp - off] slot contains scalar that needs to be
+ * tracked with precision
+ */
+ spi = (-insn->off - 1) / BPF_REG_SIZE;
+ if (spi >= 64) {
+ verbose(env, "BUG spi %d\n", spi);
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+ *stack_mask |= 1ull << spi;
+ } else if (class == BPF_STX) {
+ if (*reg_mask & dreg)
+ /* stx shouldn't be using _scalar_ dst_reg
+ * to access memory. It means backtracking
+ * encountered a case of pointer subtraction.
+ */
+ return -ENOTSUPP;
+ /* scalars can only be spilled into stack */
+ if (insn->dst_reg != BPF_REG_FP)
+ return 0;
+ if (BPF_SIZE(insn->code) != BPF_DW)
+ return 0;
+ spi = (-insn->off - 1) / BPF_REG_SIZE;
+ if (spi >= 64) {
+ verbose(env, "BUG spi %d\n", spi);
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+ if (!(*stack_mask & (1ull << spi)))
+ return 0;
+ *stack_mask &= ~(1ull << spi);
+ *reg_mask |= sreg;
+ } else if (class == BPF_JMP || class == BPF_JMP32) {
+ if (opcode == BPF_CALL) {
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ return -ENOTSUPP;
+ /* regular helper call sets R0 */
+ *reg_mask &= ~1;
+ if (*reg_mask & 0x3f) {
+ /* if backtracing was looking for registers R1-R5
+ * they should have been found already.
+ */
+ verbose(env, "BUG regs %x\n", *reg_mask);
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+ } else if (opcode == BPF_EXIT) {
+ return -ENOTSUPP;
+ }
+ } else if (class == BPF_LD) {
+ if (!(*reg_mask & dreg))
+ return 0;
+ *reg_mask &= ~dreg;
+ /* It's ld_imm64 or ld_abs or ld_ind.
+ * For ld_imm64 no further tracking of precision
+ * into parent is necessary
+ */
+ if (mode == BPF_IND || mode == BPF_ABS)
+ /* to be analyzed */
+ return -ENOTSUPP;
+ } else if (class == BPF_ST) {
+ if (*reg_mask & dreg)
+ /* likely pointer subtraction */
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+/* the scalar precision tracking algorithm:
+ * . at the start all registers have precise=false.
+ * . scalar ranges are tracked as normal through alu and jmp insns.
+ * . once precise value of the scalar register is used in:
+ * . ptr + scalar alu
+ * . if (scalar cond K|scalar)
+ * . helper_call(.., scalar, ...) where ARG_CONST is expected
+ * backtrack through the verifier states and mark all registers and
+ * stack slots with spilled constants that these scalar regisers
+ * should be precise.
+ * . during state pruning two registers (or spilled stack slots)
+ * are equivalent if both are not precise.
+ *
+ * Note the verifier cannot simply walk register parentage chain,
+ * since many different registers and stack slots could have been
+ * used to compute single precise scalar.
+ *
+ * The approach of starting with precise=true for all registers and then
+ * backtrack to mark a register as not precise when the verifier detects
+ * that program doesn't care about specific value (e.g., when helper
+ * takes register as ARG_ANYTHING parameter) is not safe.
+ *
+ * It's ok to walk single parentage chain of the verifier states.
+ * It's possible that this backtracking will go all the way till 1st insn.
+ * All other branches will be explored for needing precision later.
+ *
+ * The backtracking needs to deal with cases like:
+ * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
+ * r9 -= r8
+ * r5 = r9
+ * if r5 > 0x79f goto pc+7
+ * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
+ * r5 += 1
+ * ...
+ * call bpf_perf_event_output#25
+ * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
+ *
+ * and this case:
+ * r6 = 1
+ * call foo // uses callee's r6 inside to compute r0
+ * r0 += r6
+ * if r0 == 0 goto
+ *
+ * to track above reg_mask/stack_mask needs to be independent for each frame.
+ *
+ * Also if parent's curframe > frame where backtracking started,
+ * the verifier need to mark registers in both frames, otherwise callees
+ * may incorrectly prune callers. This is similar to
+ * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
+ *
+ * For now backtracking falls back into conservative marking.
+ */
+static void mark_all_scalars_precise(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *st)
+{
+ struct bpf_func_state *func;
+ struct bpf_reg_state *reg;
+ int i, j;
+
+ /* big hammer: mark all scalars precise in this path.
+ * pop_stack may still get !precise scalars.
+ */
+ for (; st; st = st->parent)
+ for (i = 0; i <= st->curframe; i++) {
+ func = st->frame[i];
+ for (j = 0; j < BPF_REG_FP; j++) {
+ reg = &func->regs[j];
+ if (reg->type != SCALAR_VALUE)
+ continue;
+ reg->precise = true;
+ }
+ for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
+ if (func->stack[j].slot_type[0] != STACK_SPILL)
+ continue;
+ reg = &func->stack[j].spilled_ptr;
+ if (reg->type != SCALAR_VALUE)
+ continue;
+ reg->precise = true;
+ }
+ }
+}
+
+static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
+{
+ struct bpf_verifier_state *st = env->cur_state;
+ int first_idx = st->first_insn_idx;
+ int last_idx = env->insn_idx;
+ struct bpf_func_state *func;
+ struct bpf_reg_state *reg;
+ u32 reg_mask = 1u << regno;
+ u64 stack_mask = 0;
+ bool skip_first = true;
+ int i, err;
+
+ if (!env->allow_ptr_leaks)
+ /* backtracking is root only for now */
+ return 0;
+
+ func = st->frame[st->curframe];
+ reg = &func->regs[regno];
+ if (reg->type != SCALAR_VALUE) {
+ WARN_ONCE(1, "backtracing misuse");
+ return -EFAULT;
+ }
+ if (reg->precise)
+ return 0;
+ func->regs[regno].precise = true;
+
+ for (;;) {
+ DECLARE_BITMAP(mask, 64);
+ bool new_marks = false;
+ u32 history = st->jmp_history_cnt;
+
+ if (env->log.level & BPF_LOG_LEVEL)
+ verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
+ for (i = last_idx;;) {
+ if (skip_first) {
+ err = 0;
+ skip_first = false;
+ } else {
+ err = backtrack_insn(env, i, &reg_mask, &stack_mask);
+ }
+ if (err == -ENOTSUPP) {
+ mark_all_scalars_precise(env, st);
+ return 0;
+ } else if (err) {
+ return err;
+ }
+ if (!reg_mask && !stack_mask)
+ /* Found assignment(s) into tracked register in this state.
+ * Since this state is already marked, just return.
+ * Nothing to be tracked further in the parent state.
+ */
+ return 0;
+ if (i == first_idx)
+ break;
+ i = get_prev_insn_idx(st, i, &history);
+ if (i >= env->prog->len) {
+ /* This can happen if backtracking reached insn 0
+ * and there are still reg_mask or stack_mask
+ * to backtrack.
+ * It means the backtracking missed the spot where
+ * particular register was initialized with a constant.
+ */
+ verbose(env, "BUG backtracking idx %d\n", i);
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+ }
+ st = st->parent;
+ if (!st)
+ break;
+
+ func = st->frame[st->curframe];
+ bitmap_from_u64(mask, reg_mask);
+ for_each_set_bit(i, mask, 32) {
+ reg = &func->regs[i];
+ if (reg->type != SCALAR_VALUE)
+ continue;
+ if (!reg->precise)
+ new_marks = true;
+ reg->precise = true;
+ }
+
+ bitmap_from_u64(mask, stack_mask);
+ for_each_set_bit(i, mask, 64) {
+ if (i >= func->allocated_stack / BPF_REG_SIZE) {
+ /* This can happen if backtracking
+ * is propagating stack precision where
+ * caller has larger stack frame
+ * than callee, but backtrack_insn() should
+ * have returned -ENOTSUPP.
+ */
+ verbose(env, "BUG spi %d stack_size %d\n",
+ i, func->allocated_stack);
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+
+ if (func->stack[i].slot_type[0] != STACK_SPILL)
+ continue;
+ reg = &func->stack[i].spilled_ptr;
+ if (reg->type != SCALAR_VALUE)
+ continue;
+ if (!reg->precise)
+ new_marks = true;
+ reg->precise = true;
+ }
+ if (env->log.level & BPF_LOG_LEVEL) {
+ print_verifier_state(env, func);
+ verbose(env, "parent %s regs=%x stack=%llx marks\n",
+ new_marks ? "didn't have" : "already had",
+ reg_mask, stack_mask);
+ }
+
+ if (!new_marks)
+ break;
+
+ last_idx = st->last_insn_idx;
+ first_idx = st->first_insn_idx;
+ }
+ return 0;
+}
+
+
static bool is_spillable_regtype(enum bpf_reg_type type)
{
switch (type) {
@@ -1220,6 +1800,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
case PTR_TO_SOCK_COMMON_OR_NULL:
case PTR_TO_TCP_SOCK:
case PTR_TO_TCP_SOCK_OR_NULL:
+ case PTR_TO_XDP_SOCK:
return true;
default:
return false;
@@ -1232,6 +1813,23 @@ static bool register_is_null(struct bpf_reg_state *reg)
return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
}
+static bool register_is_const(struct bpf_reg_state *reg)
+{
+ return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
+}
+
+static void save_register_state(struct bpf_func_state *state,
+ int spi, struct bpf_reg_state *reg)
+{
+ int i;
+
+ state->stack[spi].spilled_ptr = *reg;
+ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
+ for (i = 0; i < BPF_REG_SIZE; i++)
+ state->stack[spi].slot_type[i] = STACK_SPILL;
+}
+
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
@@ -1241,7 +1839,8 @@ static int check_stack_write(struct bpf_verifier_env *env,
{
struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
- enum bpf_reg_type type;
+ u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
+ struct bpf_reg_state *reg = NULL;
err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
state->acquired_refs, true);
@@ -1258,27 +1857,48 @@ static int check_stack_write(struct bpf_verifier_env *env,
}
cur = env->cur_state->frame[env->cur_state->curframe];
- if (value_regno >= 0 &&
- is_spillable_regtype((type = cur->regs[value_regno].type))) {
-
+ if (value_regno >= 0)
+ reg = &cur->regs[value_regno];
+
+ if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
+ !register_is_null(reg) && env->allow_ptr_leaks) {
+ if (dst_reg != BPF_REG_FP) {
+ /* The backtracking logic can only recognize explicit
+ * stack slot address like [fp - 8]. Other spill of
+ * scalar via different register has to be conervative.
+ * Backtrack from here and mark all registers as precise
+ * that contributed into 'reg' being a constant.
+ */
+ err = mark_chain_precision(env, value_regno);
+ if (err)
+ return err;
+ }
+ save_register_state(state, spi, reg);
+ } else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
+ verbose_linfo(env, insn_idx, "; ");
verbose(env, "invalid size of register spill\n");
return -EACCES;
}
- if (state != cur && type == PTR_TO_STACK) {
+ if (state != cur && reg->type == PTR_TO_STACK) {
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL;
}
- /* save register state */
- state->stack[spi].spilled_ptr = cur->regs[value_regno];
- state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ if (!env->allow_ptr_leaks) {
+ bool sanitize = false;
- for (i = 0; i < BPF_REG_SIZE; i++) {
- if (state->stack[spi].slot_type[i] == STACK_MISC &&
- !env->allow_ptr_leaks) {
+ if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+ register_is_const(&state->stack[spi].spilled_ptr))
+ sanitize = true;
+ for (i = 0; i < BPF_REG_SIZE; i++)
+ if (state->stack[spi].slot_type[i] == STACK_MISC) {
+ sanitize = true;
+ break;
+ }
+ if (sanitize) {
int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
int soff = (-spi - 1) * BPF_REG_SIZE;
@@ -1301,8 +1921,8 @@ static int check_stack_write(struct bpf_verifier_env *env,
}
*poff = soff;
}
- state->stack[spi].slot_type[i] = STACK_SPILL;
}
+ save_register_state(state, spi, reg);
} else {
u8 type = STACK_MISC;
@@ -1325,9 +1945,13 @@ static int check_stack_write(struct bpf_verifier_env *env,
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
/* when we zero initialize stack slots mark them as such */
- if (value_regno >= 0 &&
- register_is_null(&cur->regs[value_regno]))
+ if (reg && register_is_null(reg)) {
+ /* backtracking doesn't work for STACK_ZERO yet. */
+ err = mark_chain_precision(env, value_regno);
+ if (err)
+ return err;
type = STACK_ZERO;
+ }
/* Mark slots affected by this stack write. */
for (i = 0; i < size; i++)
@@ -1344,6 +1968,7 @@ static int check_stack_read(struct bpf_verifier_env *env,
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
+ struct bpf_reg_state *reg;
u8 *stype;
if (reg_state->allocated_stack <= slot) {
@@ -1352,11 +1977,21 @@ static int check_stack_read(struct bpf_verifier_env *env,
return -EACCES;
}
stype = reg_state->stack[spi].slot_type;
+ reg = &reg_state->stack[spi].spilled_ptr;
if (stype[0] == STACK_SPILL) {
if (size != BPF_REG_SIZE) {
- verbose(env, "invalid size of register spill\n");
- return -EACCES;
+ if (reg->type != SCALAR_VALUE) {
+ verbose_linfo(env, env->insn_idx, "; ");
+ verbose(env, "invalid size of register fill\n");
+ return -EACCES;
+ }
+ if (value_regno >= 0) {
+ mark_reg_unknown(env, state->regs, value_regno);
+ state->regs[value_regno].live |= REG_LIVE_WRITTEN;
+ }
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+ return 0;
}
for (i = 1; i < BPF_REG_SIZE; i++) {
if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
@@ -1367,16 +2002,14 @@ static int check_stack_read(struct bpf_verifier_env *env,
if (value_regno >= 0) {
/* restore register state from stack */
- state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
+ state->regs[value_regno] = *reg;
/* mark reg as written since spilled pointer state likely
* has its liveness marks cleared by is_state_visited()
* which resets stack/reg liveness for state transitions
*/
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
- mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
- reg_state->stack[spi].spilled_ptr.parent);
- return 0;
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
} else {
int zeros = 0;
@@ -1391,22 +2024,32 @@ static int check_stack_read(struct bpf_verifier_env *env,
off, i, size);
return -EACCES;
}
- mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
- reg_state->stack[spi].spilled_ptr.parent);
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
if (value_regno >= 0) {
if (zeros == size) {
/* any size read into register is zero extended,
* so the whole register == const_zero
*/
__mark_reg_const_zero(&state->regs[value_regno]);
+ /* backtracking doesn't support STACK_ZERO yet,
+ * so mark it precise here, so that later
+ * backtracking can stop here.
+ * Backtracking may not need this if this register
+ * doesn't participate in pointer adjustment.
+ * Forward propagation of precise flag is not
+ * necessary either. This mark is only to stop
+ * backtracking. Any register that contributed
+ * to const 0 was marked precise before spill.
+ */
+ state->regs[value_regno].precise = true;
} else {
/* have read misc data from the stack */
mark_reg_unknown(env, state->regs, value_regno);
}
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
- return 0;
}
+ return 0;
}
static int check_stack_access(struct bpf_verifier_env *env,
@@ -1698,6 +2341,9 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
case PTR_TO_TCP_SOCK:
valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
break;
+ case PTR_TO_XDP_SOCK:
+ valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
+ break;
default:
valid = false;
}
@@ -1862,6 +2508,9 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
case PTR_TO_TCP_SOCK:
pointer_desc = "tcp_sock ";
break;
+ case PTR_TO_XDP_SOCK:
+ pointer_desc = "xdp_sock ";
+ break;
default:
break;
}
@@ -2101,6 +2750,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
value_regno);
if (reg_type_may_be_null(reg_type))
regs[value_regno].id = ++env->id_gen;
+ /* A load of ctx field could have different
+ * actual load size with the one encoded in the
+ * insn. When the dst is PTR, it is for sure not
+ * a sub-register.
+ */
+ regs[value_regno].subreg_def = DEF_NOT_SUBREG;
}
regs[value_regno].type = reg_type;
}
@@ -2255,7 +2910,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
{
struct bpf_reg_state *reg = reg_state(env, regno);
struct bpf_func_state *state = func(env, reg);
- int err, min_off, max_off, i, slot, spi;
+ int err, min_off, max_off, i, j, slot, spi;
if (reg->type != PTR_TO_STACK) {
/* Allow zero-byte read from NULL, regardless of pointer type */
@@ -2343,6 +2998,14 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
*stype = STACK_MISC;
goto mark;
}
+ if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+ state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
+ __mark_reg_unknown(&state->stack[spi].spilled_ptr);
+ for (j = 0; j < BPF_REG_SIZE; j++)
+ state->stack[spi].slot_type[j] = STACK_MISC;
+ goto mark;
+ }
+
err:
if (tnum_is_const(reg->var_off)) {
verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
@@ -2360,7 +3023,8 @@ mark:
* the whole slot to be marked as 'read'
*/
mark_reg_read(env, &state->stack[spi].spilled_ptr,
- state->stack[spi].spilled_ptr.parent);
+ state->stack[spi].spilled_ptr.parent,
+ REG_LIVE_READ64);
}
return update_stack_depth(env, state, min_off);
}
@@ -2693,6 +3357,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
err = check_helper_mem_access(env, regno - 1,
reg->umax_value,
zero_size_allowed, meta);
+ if (!err)
+ err = mark_chain_precision(env, regno);
} else if (arg_type_is_int_ptr(arg_type)) {
int size = int_ptr_type_to_size(arg_type);
@@ -2753,10 +3419,14 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
* appear.
*/
case BPF_MAP_TYPE_CPUMAP:
- case BPF_MAP_TYPE_XSKMAP:
if (func_id != BPF_FUNC_redirect_map)
goto error;
break;
+ case BPF_MAP_TYPE_XSKMAP:
+ if (func_id != BPF_FUNC_redirect_map &&
+ func_id != BPF_FUNC_map_lookup_elem)
+ goto error;
+ break;
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
if (func_id != BPF_FUNC_map_lookup_elem)
@@ -3324,6 +3994,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
+ /* helper call returns 64-bit value. */
+ regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
+
/* update return register (already marked as written above) */
if (fn->ret_type == RET_INTEGER) {
/* sets type to SCALAR_VALUE */
@@ -3644,6 +4317,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
case PTR_TO_SOCK_COMMON_OR_NULL:
case PTR_TO_TCP_SOCK:
case PTR_TO_TCP_SOCK_OR_NULL:
+ case PTR_TO_XDP_SOCK:
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
dst, reg_type_str[ptr_reg->type]);
return -EACCES;
@@ -4121,6 +4795,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
u8 opcode = BPF_OP(insn->code);
+ int err;
dst_reg = &regs[insn->dst_reg];
src_reg = NULL;
@@ -4147,11 +4822,17 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
* This is legal, but we have to reverse our
* src/dest handling in computing the range
*/
+ err = mark_chain_precision(env, insn->dst_reg);
+ if (err)
+ return err;
return adjust_ptr_min_max_vals(env, insn,
src_reg, dst_reg);
}
} else if (ptr_reg) {
/* pointer += scalar */
+ err = mark_chain_precision(env, insn->src_reg);
+ if (err)
+ return err;
return adjust_ptr_min_max_vals(env, insn,
dst_reg, src_reg);
}
@@ -4255,6 +4936,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
*/
*dst_reg = *src_reg;
dst_reg->live |= REG_LIVE_WRITTEN;
+ dst_reg->subreg_def = DEF_NOT_SUBREG;
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
@@ -4265,6 +4947,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
} else if (src_reg->type == SCALAR_VALUE) {
*dst_reg = *src_reg;
dst_reg->live |= REG_LIVE_WRITTEN;
+ dst_reg->subreg_def = env->insn_idx + 1;
} else {
mark_reg_unknown(env, regs,
insn->dst_reg);
@@ -4881,6 +5564,9 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
if (reg->map_ptr->inner_map_meta) {
reg->type = CONST_PTR_TO_MAP;
reg->map_ptr = reg->map_ptr->inner_map_meta;
+ } else if (reg->map_ptr->map_type ==
+ BPF_MAP_TYPE_XSKMAP) {
+ reg->type = PTR_TO_XDP_SOCK;
} else {
reg->type = PTR_TO_MAP_VALUE;
}
@@ -5052,9 +5738,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_verifier_state *this_branch = env->cur_state;
struct bpf_verifier_state *other_branch;
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
- struct bpf_reg_state *dst_reg, *other_branch_regs;
+ struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
u8 opcode = BPF_OP(insn->code);
bool is_jmp32;
+ int pred = -1;
int err;
/* Only conditional jumps are expected to reach here. */
@@ -5079,6 +5766,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
insn->src_reg);
return -EACCES;
}
+ src_reg = &regs[insn->src_reg];
} else {
if (insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
@@ -5094,20 +5782,29 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
dst_reg = &regs[insn->dst_reg];
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- if (BPF_SRC(insn->code) == BPF_K) {
- int pred = is_branch_taken(dst_reg, insn->imm, opcode,
- is_jmp32);
-
- if (pred == 1) {
- /* only follow the goto, ignore fall-through */
- *insn_idx += insn->off;
- return 0;
- } else if (pred == 0) {
- /* only follow fall-through branch, since
- * that's where the program will go
- */
- return 0;
- }
+ if (BPF_SRC(insn->code) == BPF_K)
+ pred = is_branch_taken(dst_reg, insn->imm,
+ opcode, is_jmp32);
+ else if (src_reg->type == SCALAR_VALUE &&
+ tnum_is_const(src_reg->var_off))
+ pred = is_branch_taken(dst_reg, src_reg->var_off.value,
+ opcode, is_jmp32);
+ if (pred >= 0) {
+ err = mark_chain_precision(env, insn->dst_reg);
+ if (BPF_SRC(insn->code) == BPF_X && !err)
+ err = mark_chain_precision(env, insn->src_reg);
+ if (err)
+ return err;
+ }
+ if (pred == 1) {
+ /* only follow the goto, ignore fall-through */
+ *insn_idx += insn->off;
+ return 0;
+ } else if (pred == 0) {
+ /* only follow fall-through branch, since
+ * that's where the program will go
+ */
+ return 0;
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
@@ -5344,11 +6041,14 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
* Already marked as written above.
*/
mark_reg_unknown(env, regs, BPF_REG_0);
+ /* ld_abs load up to 32-bit skb data. */
+ regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
return 0;
}
static int check_return_code(struct bpf_verifier_env *env)
{
+ struct tnum enforce_attach_type_range = tnum_unknown;
struct bpf_reg_state *reg;
struct tnum range = tnum_range(0, 1);
@@ -5358,6 +6058,10 @@ static int check_return_code(struct bpf_verifier_env *env)
env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
range = tnum_range(1, 1);
case BPF_PROG_TYPE_CGROUP_SKB:
+ if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
+ range = tnum_range(0, 3);
+ enforce_attach_type_range = tnum_range(2, 3);
+ }
case BPF_PROG_TYPE_CGROUP_SOCK:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_CGROUP_DEVICE:
@@ -5388,6 +6092,10 @@ static int check_return_code(struct bpf_verifier_env *env)
verbose(env, " should have been in %s\n", tn_buf);
return -EINVAL;
}
+
+ if (!tnum_is_unknown(enforce_attach_type_range) &&
+ tnum_in(enforce_attach_type_range, reg->var_off))
+ env->prog->enforce_expected_attach_type = 1;
return 0;
}
@@ -5431,14 +6139,33 @@ enum {
BRANCH = 2,
};
-#define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
+static u32 state_htab_size(struct bpf_verifier_env *env)
+{
+ return env->prog->len;
+}
+
+static struct bpf_verifier_state_list **explored_state(
+ struct bpf_verifier_env *env,
+ int idx)
+{
+ struct bpf_verifier_state *cur = env->cur_state;
+ struct bpf_func_state *state = cur->frame[cur->curframe];
+
+ return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
+}
+
+static void init_explored_state(struct bpf_verifier_env *env, int idx)
+{
+ env->insn_aux_data[idx].prune_point = true;
+}
/* t, w, e - match pseudo-code above:
* t - index of current instruction
* w - next instruction
* e - edge
*/
-static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
+static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
+ bool loop_ok)
{
int *insn_stack = env->cfg.insn_stack;
int *insn_state = env->cfg.insn_state;
@@ -5457,7 +6184,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
if (e == BRANCH)
/* mark branch target for state pruning */
- env->explored_states[w] = STATE_LIST_MARK;
+ init_explored_state(env, w);
if (insn_state[w] == 0) {
/* tree-edge */
@@ -5468,6 +6195,8 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
insn_stack[env->cfg.cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
+ if (loop_ok && env->allow_ptr_leaks)
+ return 0;
verbose_linfo(env, t, "%d: ", t);
verbose_linfo(env, w, "%d: ", w);
verbose(env, "back-edge from insn %d to %d\n", t, w);
@@ -5519,16 +6248,17 @@ peek_stack:
if (opcode == BPF_EXIT) {
goto mark_explored;
} else if (opcode == BPF_CALL) {
- ret = push_insn(t, t + 1, FALLTHROUGH, env);
+ ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
if (t + 1 < insn_cnt)
- env->explored_states[t + 1] = STATE_LIST_MARK;
+ init_explored_state(env, t + 1);
if (insns[t].src_reg == BPF_PSEUDO_CALL) {
- env->explored_states[t] = STATE_LIST_MARK;
- ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
+ init_explored_state(env, t);
+ ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
+ env, false);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
@@ -5541,26 +6271,31 @@ peek_stack:
}
/* unconditional jump with single edge */
ret = push_insn(t, t + insns[t].off + 1,
- FALLTHROUGH, env);
+ FALLTHROUGH, env, true);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
+ /* unconditional jmp is not a good pruning point,
+ * but it's marked, since backtracking needs
+ * to record jmp history in is_state_visited().
+ */
+ init_explored_state(env, t + insns[t].off + 1);
/* tell verifier to check for equivalent states
* after every call and jump
*/
if (t + 1 < insn_cnt)
- env->explored_states[t + 1] = STATE_LIST_MARK;
+ init_explored_state(env, t + 1);
} else {
/* conditional jump with two edges */
- env->explored_states[t] = STATE_LIST_MARK;
- ret = push_insn(t, t + 1, FALLTHROUGH, env);
+ init_explored_state(env, t);
+ ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
- ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
+ ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
@@ -5570,7 +6305,7 @@ peek_stack:
/* all other non-branch instructions with single
* fall-through edge
*/
- ret = push_insn(t, t + 1, FALLTHROUGH, env);
+ ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
@@ -6001,12 +6736,12 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn,
struct bpf_verifier_state_list *sl;
int i;
- sl = env->explored_states[insn];
- if (!sl)
- return;
-
- while (sl != STATE_LIST_MARK) {
- if (sl->state.curframe != cur->curframe)
+ sl = *explored_state(env, insn);
+ while (sl) {
+ if (sl->state.branches)
+ goto next;
+ if (sl->state.insn_idx != insn ||
+ sl->state.curframe != cur->curframe)
goto next;
for (i = 0; i <= cur->curframe; i++)
if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
@@ -6046,6 +6781,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
switch (rold->type) {
case SCALAR_VALUE:
if (rcur->type == SCALAR_VALUE) {
+ if (!rold->precise && !rcur->precise)
+ return true;
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
@@ -6118,6 +6855,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
case PTR_TO_SOCK_COMMON_OR_NULL:
case PTR_TO_TCP_SOCK:
case PTR_TO_TCP_SOCK_OR_NULL:
+ case PTR_TO_XDP_SOCK:
/* Only valid matches are exact, which memcmp() above
* would have accepted
*/
@@ -6288,20 +7026,33 @@ static bool states_equal(struct bpf_verifier_env *env,
return true;
}
+/* Return 0 if no propagation happened. Return negative error code if error
+ * happened. Otherwise, return the propagated bit.
+ */
static int propagate_liveness_reg(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
struct bpf_reg_state *parent_reg)
{
+ u8 parent_flag = parent_reg->live & REG_LIVE_READ;
+ u8 flag = reg->live & REG_LIVE_READ;
int err;
- if (parent_reg->live & REG_LIVE_READ || !(reg->live & REG_LIVE_READ))
+ /* When comes here, read flags of PARENT_REG or REG could be any of
+ * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
+ * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
+ */
+ if (parent_flag == REG_LIVE_READ64 ||
+ /* Or if there is no read flag from REG. */
+ !flag ||
+ /* Or if the read flag from REG is the same as PARENT_REG. */
+ parent_flag == flag)
return 0;
- err = mark_reg_read(env, reg, parent_reg);
+ err = mark_reg_read(env, reg, parent_reg, flag);
if (err)
return err;
- return 0;
+ return flag;
}
/* A write screens off any subsequent reads; but write marks come from the
@@ -6335,8 +7086,10 @@ static int propagate_liveness(struct bpf_verifier_env *env,
for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
err = propagate_liveness_reg(env, &state_reg[i],
&parent_reg[i]);
- if (err)
+ if (err < 0)
return err;
+ if (err == REG_LIVE_READ64)
+ mark_insn_zext(env, &parent_reg[i]);
}
/* Propagate stack slots. */
@@ -6346,32 +7099,92 @@ static int propagate_liveness(struct bpf_verifier_env *env,
state_reg = &state->stack[i].spilled_ptr;
err = propagate_liveness_reg(env, state_reg,
parent_reg);
- if (err)
+ if (err < 0)
return err;
}
}
- return err;
+ return 0;
+}
+
+static bool states_maybe_looping(struct bpf_verifier_state *old,
+ struct bpf_verifier_state *cur)
+{
+ struct bpf_func_state *fold, *fcur;
+ int i, fr = cur->curframe;
+
+ if (old->curframe != fr)
+ return false;
+
+ fold = old->frame[fr];
+ fcur = cur->frame[fr];
+ for (i = 0; i < MAX_BPF_REG; i++)
+ if (memcmp(&fold->regs[i], &fcur->regs[i],
+ offsetof(struct bpf_reg_state, parent)))
+ return false;
+ return true;
}
+
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl, **pprev;
struct bpf_verifier_state *cur = env->cur_state, *new;
int i, j, err, states_cnt = 0;
+ bool add_new_state = false;
- pprev = &env->explored_states[insn_idx];
- sl = *pprev;
-
- if (!sl)
+ cur->last_insn_idx = env->prev_insn_idx;
+ if (!env->insn_aux_data[insn_idx].prune_point)
/* this 'insn_idx' instruction wasn't marked, so we will not
* be doing state search here
*/
return 0;
+ /* bpf progs typically have pruning point every 4 instructions
+ * http://vger.kernel.org/bpfconf2019.html#session-1
+ * Do not add new state for future pruning if the verifier hasn't seen
+ * at least 2 jumps and at least 8 instructions.
+ * This heuristics helps decrease 'total_states' and 'peak_states' metric.
+ * In tests that amounts to up to 50% reduction into total verifier
+ * memory consumption and 20% verifier time speedup.
+ */
+ if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
+ env->insn_processed - env->prev_insn_processed >= 8)
+ add_new_state = true;
+
+ pprev = explored_state(env, insn_idx);
+ sl = *pprev;
+
clean_live_states(env, insn_idx, cur);
- while (sl != STATE_LIST_MARK) {
+ while (sl) {
+ states_cnt++;
+ if (sl->state.insn_idx != insn_idx)
+ goto next;
+ if (sl->state.branches) {
+ if (states_maybe_looping(&sl->state, cur) &&
+ states_equal(env, &sl->state, cur)) {
+ verbose_linfo(env, insn_idx, "; ");
+ verbose(env, "infinite loop detected at insn %d\n", insn_idx);
+ return -EINVAL;
+ }
+ /* if the verifier is processing a loop, avoid adding new state
+ * too often, since different loop iterations have distinct
+ * states and may not help future pruning.
+ * This threshold shouldn't be too low to make sure that
+ * a loop with large bound will be rejected quickly.
+ * The most abusive loop will be:
+ * r1 += 1
+ * if r1 < 1000000 goto pc-2
+ * 1M insn_procssed limit / 100 == 10k peak states.
+ * This threshold shouldn't be too high either, since states
+ * at the end of the loop are likely to be useful in pruning.
+ */
+ if (env->jmps_processed - env->prev_jmps_processed < 20 &&
+ env->insn_processed - env->prev_insn_processed < 100)
+ add_new_state = false;
+ goto miss;
+ }
if (states_equal(env, &sl->state, cur)) {
sl->hit_cnt++;
/* reached equivalent register/stack state,
@@ -6389,8 +7202,15 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
return err;
return 1;
}
- states_cnt++;
- sl->miss_cnt++;
+miss:
+ /* when new state is not going to be added do not increase miss count.
+ * Otherwise several loop iterations will remove the state
+ * recorded earlier. The goal of these heuristics is to have
+ * states from some iterations of the loop (some in the beginning
+ * and some at the end) to help pruning.
+ */
+ if (add_new_state)
+ sl->miss_cnt++;
/* heuristic to determine whether this state is beneficial
* to keep checking from state equivalence point of view.
* Higher numbers increase max_states_per_insn and verification time,
@@ -6402,6 +7222,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
*/
*pprev = sl->next;
if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
+ u32 br = sl->state.branches;
+
+ WARN_ONCE(br,
+ "BUG live_done but branches_to_explore %d\n",
+ br);
free_verifier_state(&sl->state, false);
kfree(sl);
env->peak_states--;
@@ -6416,6 +7241,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
sl = *pprev;
continue;
}
+next:
pprev = &sl->next;
sl = *pprev;
}
@@ -6424,20 +7250,27 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
env->max_states_per_insn = states_cnt;
if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
- return 0;
+ return push_jmp_history(env, cur);
- /* there were no equivalent states, remember current one.
- * technically the current state is not proven to be safe yet,
+ if (!add_new_state)
+ return push_jmp_history(env, cur);
+
+ /* There were no equivalent states, remember the current one.
+ * Technically the current state is not proven to be safe yet,
* but it will either reach outer most bpf_exit (which means it's safe)
- * or it will be rejected. Since there are no loops, we won't be
+ * or it will be rejected. When there are no loops the verifier won't be
* seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
- * again on the way to bpf_exit
+ * again on the way to bpf_exit.
+ * When looping the sl->state.branches will be > 0 and this state
+ * will not be considered for equivalence until branches == 0.
*/
new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
if (!new_sl)
return -ENOMEM;
env->total_states++;
env->peak_states++;
+ env->prev_jmps_processed = env->jmps_processed;
+ env->prev_insn_processed = env->insn_processed;
/* add new state to the head of linked list */
new = &new_sl->state;
@@ -6447,8 +7280,15 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
kfree(new_sl);
return err;
}
- new_sl->next = env->explored_states[insn_idx];
- env->explored_states[insn_idx] = new_sl;
+ new->insn_idx = insn_idx;
+ WARN_ONCE(new->branches != 1,
+ "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
+
+ cur->parent = new;
+ cur->first_insn_idx = insn_idx;
+ clear_jmp_history(cur);
+ new_sl->next = *explored_state(env, insn_idx);
+ *explored_state(env, insn_idx) = new_sl;
/* connect new state to parentage chain. Current frame needs all
* registers connected. Only r6 - r9 of the callers are alive (pushed
* to the stack implicitly by JITs) so in callers' frames connect just
@@ -6456,17 +7296,18 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* the state of the call instruction (with WRITTEN set), and r0 comes
* from callee with its full parentage chain, anyway.
*/
- for (j = 0; j <= cur->curframe; j++)
- for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
- cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
/* clear write marks in current state: the writes we did are not writes
* our child did, so they don't screen off its reads from us.
* (There are no read marks in current state, because reads always mark
* their parent and current state never has children yet. Only
* explored_states can get read marks.)
*/
- for (i = 0; i < BPF_REG_FP; i++)
- cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
+ for (j = 0; j <= cur->curframe; j++) {
+ for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
+ cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
+ for (i = 0; i < BPF_REG_FP; i++)
+ cur->frame[j]->regs[i].live = REG_LIVE_NONE;
+ }
/* all stack frames are accessible from callee, clear them all */
for (j = 0; j <= cur->curframe; j++) {
@@ -6493,6 +7334,7 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type)
case PTR_TO_SOCK_COMMON_OR_NULL:
case PTR_TO_TCP_SOCK:
case PTR_TO_TCP_SOCK_OR_NULL:
+ case PTR_TO_XDP_SOCK:
return false;
default:
return true;
@@ -6524,6 +7366,7 @@ static int do_check(struct bpf_verifier_env *env)
struct bpf_reg_state *regs;
int insn_cnt = env->prog->len;
bool do_print_state = false;
+ int prev_insn_idx = -1;
env->prev_linfo = NULL;
@@ -6532,6 +7375,7 @@ static int do_check(struct bpf_verifier_env *env)
return -ENOMEM;
state->curframe = 0;
state->speculative = false;
+ state->branches = 1;
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
if (!state->frame[0]) {
kfree(state);
@@ -6548,6 +7392,7 @@ static int do_check(struct bpf_verifier_env *env)
u8 class;
int err;
+ env->prev_insn_idx = prev_insn_idx;
if (env->insn_idx >= insn_cnt) {
verbose(env, "invalid insn idx %d insn_cnt %d\n",
env->insn_idx, insn_cnt);
@@ -6620,6 +7465,7 @@ static int do_check(struct bpf_verifier_env *env)
regs = cur_regs(env);
env->insn_aux_data[env->insn_idx].seen = true;
+ prev_insn_idx = env->insn_idx;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
@@ -6738,6 +7584,7 @@ static int do_check(struct bpf_verifier_env *env)
} else if (class == BPF_JMP || class == BPF_JMP32) {
u8 opcode = BPF_OP(insn->code);
+ env->jmps_processed++;
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
@@ -6792,7 +7639,6 @@ static int do_check(struct bpf_verifier_env *env)
if (state->curframe) {
/* exit from nested function */
- env->prev_insn_idx = env->insn_idx;
err = prepare_func_exit(env, &env->insn_idx);
if (err)
return err;
@@ -6823,7 +7669,8 @@ static int do_check(struct bpf_verifier_env *env)
if (err)
return err;
process_bpf_exit:
- err = pop_stack(env, &env->prev_insn_idx,
+ update_branch_counts(env, env->cur_state);
+ err = pop_stack(env, &prev_insn_idx,
&env->insn_idx);
if (err < 0) {
if (err != -ENOENT)
@@ -7126,14 +7973,23 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
* insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
* [0, off) and [off, end) to new locations, so the patched range stays zero
*/
-static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
- u32 off, u32 cnt)
+static int adjust_insn_aux_data(struct bpf_verifier_env *env,
+ struct bpf_prog *new_prog, u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+ struct bpf_insn *insn = new_prog->insnsi;
+ u32 prog_len;
int i;
+ /* aux info at OFF always needs adjustment, no matter fast path
+ * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
+ * original insn at old prog.
+ */
+ old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
+
if (cnt == 1)
return 0;
+ prog_len = new_prog->len;
new_data = vzalloc(array_size(prog_len,
sizeof(struct bpf_insn_aux_data)));
if (!new_data)
@@ -7141,8 +7997,10 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
- for (i = off; i < off + cnt - 1; i++)
+ for (i = off; i < off + cnt - 1; i++) {
new_data[i].seen = true;
+ new_data[i].zext_dst = insn_has_def32(env, insn + i);
+ }
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
@@ -7175,7 +8033,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
env->insn_aux_data[off].orig_idx);
return NULL;
}
- if (adjust_insn_aux_data(env, new_prog->len, off, len))
+ if (adjust_insn_aux_data(env, new_prog, off, len))
return NULL;
adjust_subprog_starts(env, off, len);
return new_prog;
@@ -7439,6 +8297,84 @@ static int opt_remove_nops(struct bpf_verifier_env *env)
return 0;
}
+static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
+ const union bpf_attr *attr)
+{
+ struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
+ struct bpf_insn_aux_data *aux = env->insn_aux_data;
+ int i, patch_len, delta = 0, len = env->prog->len;
+ struct bpf_insn *insns = env->prog->insnsi;
+ struct bpf_prog *new_prog;
+ bool rnd_hi32;
+
+ rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
+ zext_patch[1] = BPF_ZEXT_REG(0);
+ rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
+ rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
+ rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
+ for (i = 0; i < len; i++) {
+ int adj_idx = i + delta;
+ struct bpf_insn insn;
+
+ insn = insns[adj_idx];
+ if (!aux[adj_idx].zext_dst) {
+ u8 code, class;
+ u32 imm_rnd;
+
+ if (!rnd_hi32)
+ continue;
+
+ code = insn.code;
+ class = BPF_CLASS(code);
+ if (insn_no_def(&insn))
+ continue;
+
+ /* NOTE: arg "reg" (the fourth one) is only used for
+ * BPF_STX which has been ruled out in above
+ * check, it is safe to pass NULL here.
+ */
+ if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
+ if (class == BPF_LD &&
+ BPF_MODE(code) == BPF_IMM)
+ i++;
+ continue;
+ }
+
+ /* ctx load could be transformed into wider load. */
+ if (class == BPF_LDX &&
+ aux[adj_idx].ptr_type == PTR_TO_CTX)
+ continue;
+
+ imm_rnd = get_random_int();
+ rnd_hi32_patch[0] = insn;
+ rnd_hi32_patch[1].imm = imm_rnd;
+ rnd_hi32_patch[3].dst_reg = insn.dst_reg;
+ patch = rnd_hi32_patch;
+ patch_len = 4;
+ goto apply_patch_buffer;
+ }
+
+ if (!bpf_jit_needs_zext())
+ continue;
+
+ zext_patch[0] = insn;
+ zext_patch[1].dst_reg = insn.dst_reg;
+ zext_patch[1].src_reg = insn.dst_reg;
+ patch = zext_patch;
+ patch_len = 2;
+apply_patch_buffer:
+ new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
+ if (!new_prog)
+ return -ENOMEM;
+ env->prog = new_prog;
+ insns = new_prog->insnsi;
+ aux = env->insn_aux_data;
+ delta += patch_len - 1;
+ }
+
+ return 0;
+}
+
/* convert load instructions that access fields of a context type into a
* sequence of instructions that access fields of the underlying structure:
* struct __sk_buff -> struct sk_buff
@@ -7537,6 +8473,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
case PTR_TO_TCP_SOCK:
convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
break;
+ case PTR_TO_XDP_SOCK:
+ convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
+ break;
default:
continue;
}
@@ -8126,16 +9065,15 @@ static void free_states(struct bpf_verifier_env *env)
if (!env->explored_states)
return;
- for (i = 0; i < env->prog->len; i++) {
+ for (i = 0; i < state_htab_size(env); i++) {
sl = env->explored_states[i];
- if (sl)
- while (sl != STATE_LIST_MARK) {
- sln = sl->next;
- free_verifier_state(&sl->state, false);
- kfree(sl);
- sl = sln;
- }
+ while (sl) {
+ sln = sl->next;
+ free_verifier_state(&sl->state, false);
+ kfree(sl);
+ sl = sln;
+ }
}
kvfree(env->explored_states);
@@ -8235,7 +9173,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
goto skip_full_check;
}
- env->explored_states = kvcalloc(env->prog->len,
+ env->explored_states = kvcalloc(state_htab_size(env),
sizeof(struct bpf_verifier_state_list *),
GFP_USER);
ret = -ENOMEM;
@@ -8290,6 +9228,15 @@ skip_full_check:
if (ret == 0)
ret = fixup_bpf_calls(env);
+ /* do 32-bit optimization after insn patching has done so those patched
+ * insns could be handled correctly.
+ */
+ if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
+ ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
+ env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
+ : false;
+ }
+
if (ret == 0)
ret = fixup_call_args(env);
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 686d244e798d..ef7338cebd18 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -17,8 +17,8 @@ struct xsk_map {
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{
- int cpu, err = -EINVAL;
struct xsk_map *m;
+ int cpu, err;
u64 cost;
if (!capable(CAP_NET_ADMIN))
@@ -37,13 +37,9 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
cost += sizeof(struct list_head) * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- goto free_m;
-
- m->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* Notice returns -EPERM on if map size is larger than memlock limit */
- err = bpf_map_precharge_memlock(m->map.pages);
+ err = bpf_map_charge_init(&m->map.memory, cost);
if (err)
goto free_m;
@@ -51,7 +47,7 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
m->flush_list = alloc_percpu(struct list_head);
if (!m->flush_list)
- goto free_m;
+ goto free_charge;
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
@@ -65,6 +61,8 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
free_percpu:
free_percpu(m->flush_list);
+free_charge:
+ bpf_map_charge_finish(&m->map.memory);
free_m:
kfree(m);
return ERR_PTR(err);
@@ -154,6 +152,12 @@ void __xsk_map_flush(struct bpf_map *map)
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return __xsk_map_lookup_elem(map, *(u32 *)key);
+}
+
+static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
+{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -220,6 +224,7 @@ const struct bpf_map_ops xsk_map_ops = {
.map_free = xsk_map_free,
.map_get_next_key = xsk_map_get_next_key,
.map_lookup_elem = xsk_map_lookup_elem,
+ .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
.map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem,
.map_check_btf = map_check_no_btf,
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index bf9dbffd46b1..daa567728251 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5005,8 +5005,6 @@ static void css_release_work_fn(struct work_struct *work)
if (cgrp->kn)
RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
NULL);
-
- cgroup_bpf_put(cgrp);
}
mutex_unlock(&cgroup_mutex);
@@ -5532,6 +5530,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
cgroup1_check_for_release(parent);
+ cgroup_bpf_offline(cgrp);
+
/* put the base reference */
percpu_ref_kill(&cgrp->self.refcnt);
@@ -6279,6 +6279,7 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
* Don't use cgroup_get_live().
*/
cgroup_get(sock_cgroup_ptr(skcd));
+ cgroup_bpf_get(sock_cgroup_ptr(skcd));
return;
}
@@ -6290,6 +6291,7 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
cset = task_css_set(current);
if (likely(cgroup_tryget(cset->dfl_cgrp))) {
skcd->val = (unsigned long)cset->dfl_cgrp;
+ cgroup_bpf_get(cset->dfl_cgrp);
break;
}
cpu_relax();
@@ -6300,7 +6302,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
- cgroup_put(sock_cgroup_ptr(skcd));
+ struct cgroup *cgrp = sock_cgroup_ptr(skcd);
+
+ cgroup_bpf_put(cgrp);
+ cgroup_put(cgrp);
}
#endif /* CONFIG_SOCK_CGROUP_DATA */
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 1c9a4745e596..c102c240bb0b 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -19,6 +19,9 @@
#include "trace_probe.h"
#include "trace.h"
+#define bpf_event_rcu_dereference(p) \
+ rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
+
#ifdef CONFIG_MODULES
struct bpf_trace_module {
struct module *module;
@@ -591,6 +594,69 @@ static const struct bpf_func_proto bpf_probe_read_str_proto = {
.arg3_type = ARG_ANYTHING,
};
+struct send_signal_irq_work {
+ struct irq_work irq_work;
+ struct task_struct *task;
+ u32 sig;
+};
+
+static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
+
+static void do_bpf_send_signal(struct irq_work *entry)
+{
+ struct send_signal_irq_work *work;
+
+ work = container_of(entry, struct send_signal_irq_work, irq_work);
+ group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
+}
+
+BPF_CALL_1(bpf_send_signal, u32, sig)
+{
+ struct send_signal_irq_work *work = NULL;
+
+ /* Similar to bpf_probe_write_user, task needs to be
+ * in a sound condition and kernel memory access be
+ * permitted in order to send signal to the current
+ * task.
+ */
+ if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
+ return -EPERM;
+ if (unlikely(uaccess_kernel()))
+ return -EPERM;
+ if (unlikely(!nmi_uaccess_okay()))
+ return -EPERM;
+
+ if (in_nmi()) {
+ /* Do an early check on signal validity. Otherwise,
+ * the error is lost in deferred irq_work.
+ */
+ if (unlikely(!valid_signal(sig)))
+ return -EINVAL;
+
+ work = this_cpu_ptr(&send_signal_work);
+ if (work->irq_work.flags & IRQ_WORK_BUSY)
+ return -EBUSY;
+
+ /* Add the current task, which is the target of sending signal,
+ * to the irq_work. The current task may change when queued
+ * irq works get executed.
+ */
+ work->task = current;
+ work->sig = sig;
+ irq_work_queue(&work->irq_work);
+ return 0;
+ }
+
+ return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
+}
+
+static const struct bpf_func_proto bpf_send_signal_proto = {
+ .func = bpf_send_signal,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *
tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -641,6 +707,8 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_current_cgroup_id:
return &bpf_get_current_cgroup_id_proto;
#endif
+ case BPF_FUNC_send_signal:
+ return &bpf_send_signal_proto;
default:
return NULL;
}
@@ -1102,7 +1170,7 @@ static DEFINE_MUTEX(bpf_event_mutex);
int perf_event_attach_bpf_prog(struct perf_event *event,
struct bpf_prog *prog)
{
- struct bpf_prog_array __rcu *old_array;
+ struct bpf_prog_array *old_array;
struct bpf_prog_array *new_array;
int ret = -EEXIST;
@@ -1120,7 +1188,7 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
if (event->prog)
goto unlock;
- old_array = event->tp_event->prog_array;
+ old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
if (old_array &&
bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
ret = -E2BIG;
@@ -1143,7 +1211,7 @@ unlock:
void perf_event_detach_bpf_prog(struct perf_event *event)
{
- struct bpf_prog_array __rcu *old_array;
+ struct bpf_prog_array *old_array;
struct bpf_prog_array *new_array;
int ret;
@@ -1152,7 +1220,7 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
if (!event->prog)
goto unlock;
- old_array = event->tp_event->prog_array;
+ old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
if (ret == -ENOENT)
goto unlock;
@@ -1174,6 +1242,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
{
struct perf_event_query_bpf __user *uquery = info;
struct perf_event_query_bpf query = {};
+ struct bpf_prog_array *progs;
u32 *ids, prog_cnt, ids_len;
int ret;
@@ -1198,10 +1267,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
*/
mutex_lock(&bpf_event_mutex);
- ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
- ids,
- ids_len,
- &prog_cnt);
+ progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
+ ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
mutex_unlock(&bpf_event_mutex);
if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
@@ -1411,5 +1478,18 @@ static int __init bpf_event_init(void)
return 0;
}
+static int __init send_signal_irq_work_init(void)
+{
+ int cpu;
+ struct send_signal_irq_work *work;
+
+ for_each_possible_cpu(cpu) {
+ work = per_cpu_ptr(&send_signal_work, cpu);
+ init_irq_work(&work->irq_work, do_bpf_send_signal);
+ }
+ return 0;
+}
+
fs_initcall(bpf_event_init);
+subsys_initcall(send_signal_irq_work_init);
#endif /* CONFIG_MODULES */