aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/btf.c30
-rw-r--r--kernel/bpf/devmap.c7
-rw-r--r--kernel/bpf/hashtab.c16
-rw-r--r--kernel/bpf/sockmap.c43
-rw-r--r--kernel/bpf/syscall.c4
-rw-r--r--kernel/bpf/verifier.c11
6 files changed, 69 insertions, 42 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 2d49d18b793a..e016ac3afa24 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -991,16 +991,13 @@ static void btf_int_bits_seq_show(const struct btf *btf,
void *data, u8 bits_offset,
struct seq_file *m)
{
+ u16 left_shift_bits, right_shift_bits;
u32 int_data = btf_type_int(t);
u16 nr_bits = BTF_INT_BITS(int_data);
u16 total_bits_offset;
u16 nr_copy_bytes;
u16 nr_copy_bits;
- u8 nr_upper_bits;
- union {
- u64 u64_num;
- u8 u8_nums[8];
- } print_num;
+ u64 print_num;
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
@@ -1008,21 +1005,20 @@ static void btf_int_bits_seq_show(const struct btf *btf,
nr_copy_bits = nr_bits + bits_offset;
nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
- print_num.u64_num = 0;
- memcpy(&print_num.u64_num, data, nr_copy_bytes);
+ print_num = 0;
+ memcpy(&print_num, data, nr_copy_bytes);
- /* Ditch the higher order bits */
- nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits);
- if (nr_upper_bits) {
- /* We need to mask out some bits of the upper byte. */
- u8 mask = (1 << nr_upper_bits) - 1;
+#ifdef __BIG_ENDIAN_BITFIELD
+ left_shift_bits = bits_offset;
+#else
+ left_shift_bits = BITS_PER_U64 - nr_copy_bits;
+#endif
+ right_shift_bits = BITS_PER_U64 - nr_bits;
- print_num.u8_nums[nr_copy_bytes - 1] &= mask;
- }
-
- print_num.u64_num >>= bits_offset;
+ print_num <<= left_shift_bits;
+ print_num >>= right_shift_bits;
- seq_printf(m, "0x%llx", print_num.u64_num);
+ seq_printf(m, "0x%llx", print_num);
}
static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 642c97f6d1b8..d361fc1e3bf3 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -334,10 +334,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
{
struct net_device *dev = dst->dev;
struct xdp_frame *xdpf;
+ int err;
if (!dev->netdev_ops->ndo_xdp_xmit)
return -EOPNOTSUPP;
+ err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
+ if (unlikely(err))
+ return err;
+
xdpf = convert_to_xdp_frame(xdp);
if (unlikely(!xdpf))
return -EOVERFLOW;
@@ -350,7 +355,7 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
{
int err;
- err = __xdp_generic_ok_fwd_dev(skb, dst->dev);
+ err = xdp_ok_fwd_dev(dst->dev, skb->len);
if (unlikely(err))
return err;
skb->dev = dst->dev;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ca2198a6d22..513d9dfcf4ee 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -747,13 +747,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
* old element will be freed immediately.
* Otherwise return an error
*/
- atomic_dec(&htab->count);
- return ERR_PTR(-E2BIG);
+ l_new = ERR_PTR(-E2BIG);
+ goto dec_count;
}
l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
htab->map.numa_node);
- if (!l_new)
- return ERR_PTR(-ENOMEM);
+ if (!l_new) {
+ l_new = ERR_PTR(-ENOMEM);
+ goto dec_count;
+ }
}
memcpy(l_new->key, key, key_size);
@@ -766,7 +768,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
GFP_ATOMIC | __GFP_NOWARN);
if (!pptr) {
kfree(l_new);
- return ERR_PTR(-ENOMEM);
+ l_new = ERR_PTR(-ENOMEM);
+ goto dec_count;
}
}
@@ -780,6 +783,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new->hash = hash;
return l_new;
+dec_count:
+ atomic_dec(&htab->count);
+ return l_new;
}
static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index cf7b6a6dbd1f..98fb7938beea 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -312,10 +312,12 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
struct smap_psock *psock;
struct sock *osk;
+ lock_sock(sk);
rcu_read_lock();
psock = smap_psock_sk(sk);
if (unlikely(!psock)) {
rcu_read_unlock();
+ release_sock(sk);
return sk->sk_prot->close(sk, timeout);
}
@@ -371,6 +373,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
e = psock_map_pop(sk, psock);
}
rcu_read_unlock();
+ release_sock(sk);
close_fun(sk, timeout);
}
@@ -568,7 +571,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
while (sg[i].length) {
free += sg[i].length;
sk_mem_uncharge(sk, sg[i].length);
- put_page(sg_page(&sg[i]));
+ if (!md->skb)
+ put_page(sg_page(&sg[i]));
sg[i].length = 0;
sg[i].page_link = 0;
sg[i].offset = 0;
@@ -577,6 +581,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
if (i == MAX_SKB_FRAGS)
i = 0;
}
+ if (md->skb)
+ consume_skb(md->skb);
return free;
}
@@ -1230,7 +1236,7 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
*/
TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
skb->sk = psock->sock;
- bpf_compute_data_pointers(skb);
+ bpf_compute_data_end_sk_skb(skb);
preempt_disable();
rc = (*prog->bpf_func)(skb, prog->insnsi);
preempt_enable();
@@ -1485,7 +1491,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
* any socket yet.
*/
skb->sk = psock->sock;
- bpf_compute_data_pointers(skb);
+ bpf_compute_data_end_sk_skb(skb);
rc = (*prog->bpf_func)(skb, prog->insnsi);
skb->sk = NULL;
rcu_read_unlock();
@@ -1896,7 +1902,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
if (!e) {
err = -ENOMEM;
- goto out_progs;
+ goto out_free;
}
}
@@ -2069,7 +2075,13 @@ static int sock_map_update_elem(struct bpf_map *map,
return -EOPNOTSUPP;
}
+ lock_sock(skops.sk);
+ preempt_disable();
+ rcu_read_lock();
err = sock_map_ctx_update_elem(&skops, map, key, flags);
+ rcu_read_unlock();
+ preempt_enable();
+ release_sock(skops.sk);
fput(socket->file);
return err;
}
@@ -2342,7 +2354,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
if (err)
goto err;
- /* bpf_map_update_elem() can be called in_irq() */
+ /* psock is valid here because otherwise above *ctx_update_elem would
+ * have thrown an error. It is safe to skip error check.
+ */
+ psock = smap_psock_sk(sock);
raw_spin_lock_bh(&b->lock);
l_old = lookup_elem_raw(head, hash, key, key_size);
if (l_old && map_flags == BPF_NOEXIST) {
@@ -2360,12 +2375,6 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
goto bucket_err;
}
- psock = smap_psock_sk(sock);
- if (unlikely(!psock)) {
- err = -EINVAL;
- goto bucket_err;
- }
-
rcu_assign_pointer(e->hash_link, l_new);
rcu_assign_pointer(e->htab,
container_of(map, struct bpf_htab, map));
@@ -2388,12 +2397,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
raw_spin_unlock_bh(&b->lock);
return 0;
bucket_err:
+ smap_release_sock(psock, sock);
raw_spin_unlock_bh(&b->lock);
err:
kfree(e);
- psock = smap_psock_sk(sock);
- if (psock)
- smap_release_sock(psock, sock);
return err;
}
@@ -2415,7 +2422,13 @@ static int sock_hash_update_elem(struct bpf_map *map,
return -EINVAL;
}
+ lock_sock(skops.sk);
+ preempt_disable();
+ rcu_read_lock();
err = sock_hash_ctx_update_elem(&skops, map, key, flags);
+ rcu_read_unlock();
+ preempt_enable();
+ release_sock(skops.sk);
fput(socket->file);
return err;
}
@@ -2472,10 +2485,8 @@ struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash);
head = &b->head;
- raw_spin_lock_bh(&b->lock);
l = lookup_elem_raw(head, hash, key, key_size);
sk = l ? l->sk : NULL;
- raw_spin_unlock_bh(&b->lock);
return sk;
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index d10ecd78105f..a31a1ba0f8ea 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -735,7 +735,9 @@ static int map_update_elem(union bpf_attr *attr)
if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_update_elem(map, key, value, attr->flags);
goto out;
- } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
+ } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
+ map->map_type == BPF_MAP_TYPE_SOCKHASH ||
+ map->map_type == BPF_MAP_TYPE_SOCKMAP) {
err = map->ops->map_update_elem(map, key, value, attr->flags);
goto out;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9e2bf834f13a..63aaac52a265 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5430,6 +5430,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
+ /* Upon error here we cannot fall back to interpreter but
+ * need a hard reject of the program. Thus -EFAULT is
+ * propagated in any case.
+ */
subprog = find_subprog(env, i + insn->imm + 1);
if (subprog < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
@@ -5450,7 +5454,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
if (!func)
- return -ENOMEM;
+ goto out_undo_insn;
for (i = 0; i < env->subprog_cnt; i++) {
subprog_start = subprog_end;
@@ -5515,7 +5519,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
tmp = bpf_int_jit_compile(func[i]);
if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
- err = -EFAULT;
+ err = -ENOTSUPP;
goto out_free;
}
cond_resched();
@@ -5552,6 +5556,7 @@ out_free:
if (func[i])
bpf_jit_free(func[i]);
kfree(func);
+out_undo_insn:
/* cleanup main prog to be interpreted */
prog->jit_requested = 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
@@ -5578,6 +5583,8 @@ static int fixup_call_args(struct bpf_verifier_env *env)
err = jit_subprogs(env);
if (err == 0)
return 0;
+ if (err == -EFAULT)
+ return err;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
for (i = 0; i < prog->len; i++, insn++) {