aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-05-21 08:37:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-05-21 08:37:48 -0700
commit5aef268ace7436d8a25d41ee3837ecadb0115917 (patch)
treeefa69470ccffa4f7655102b57118ab81de788be5 /kernel
parentLinux 4.17-rc6 (diff)
parentnet: ip6_gre: fix tunnel metadata device sharing. (diff)
downloadlinux-dev-5aef268ace7436d8a25d41ee3837ecadb0115917.tar.xz
linux-dev-5aef268ace7436d8a25d41ee3837ecadb0115917.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix refcounting bug for connections in on-packet scheduling mode of IPVS, from Julian Anastasov. 2) Set network header properly in AF_PACKET's packet_snd, from Willem de Bruijn. 3) Fix regressions in 3c59x by converting to generic DMA API. It was relying upon the hack that the PCI DMA interfaces would accept NULL for EISA devices. From Christoph Hellwig. 4) Remove RDMA devices before unregistering netdev in QEDE driver, from Michal Kalderon. 5) Use after free in TUN driver ptr_ring usage, from Jason Wang. 6) Properly check for missing netlink attributes in SMC_PNETID requests, from Eric Biggers. 7) Set DMA mask before performaing any DMA operations in vmxnet3 driver, from Regis Duchesne. 8) Fix mlx5 build with SMP=n, from Saeed Mahameed. 9) Classifier fixes in bcm_sf2 driver from Florian Fainelli. 10) Tuntap use after free during release, from Jason Wang. 11) Don't use stack memory in scatterlists in tls code, from Matt Mullins. 12) Not fully initialized flow key object in ipv4 routing code, from David Ahern. 13) Various packet headroom bug fixes in ip6_gre driver, from Petr Machata. 14) Remove queues from XPS maps using correct index, from Amritha Nambiar. 15) Fix use after free in sock_diag, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (64 commits) net: ip6_gre: fix tunnel metadata device sharing. cxgb4: fix offset in collecting TX rate limit info net: sched: red: avoid hashing NULL child sock_diag: fix use-after-free read in __sk_free sh_eth: Change platform check to CONFIG_ARCH_RENESAS net: dsa: Do not register devlink for unused ports net: Fix a bug in removing queues from XPS map bpf: fix truncated jump targets on heavy expansions bpf: parse and verdict prog attach may race with bpf map update bpf: sockmap update rollback on error can incorrectly dec prog refcnt net: test tailroom before appending to linear skb net: ip6_gre: Fix ip6erspan hlen calculation net: ip6_gre: Split up ip6gre_changelink() net: ip6_gre: Split up ip6gre_newlink() net: ip6_gre: Split up ip6gre_tnl_change() net: ip6_gre: Split up ip6gre_tnl_link_config() net: ip6_gre: Fix headroom request in ip6erspan_tunnel_xmit() net: ip6_gre: Request headroom in __gre6_xmit() selftests/bpf: check return value of fopen in test_verifier.c erspan: fix invalid erspan version. ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c100
-rw-r--r--kernel/bpf/sockmap.c18
2 files changed, 84 insertions, 34 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ba03ec39efb3..6ef6746a7871 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -218,47 +218,84 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
return 0;
}
-static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
+static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
+ u32 curr, const bool probe_pass)
{
+ const s64 imm_min = S32_MIN, imm_max = S32_MAX;
+ s64 imm = insn->imm;
+
+ if (curr < pos && curr + imm + 1 > pos)
+ imm += delta;
+ else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
+ imm -= delta;
+ if (imm < imm_min || imm > imm_max)
+ return -ERANGE;
+ if (!probe_pass)
+ insn->imm = imm;
+ return 0;
+}
+
+static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
+ u32 curr, const bool probe_pass)
+{
+ const s32 off_min = S16_MIN, off_max = S16_MAX;
+ s32 off = insn->off;
+
+ if (curr < pos && curr + off + 1 > pos)
+ off += delta;
+ else if (curr > pos + delta && curr + off + 1 <= pos + delta)
+ off -= delta;
+ if (off < off_min || off > off_max)
+ return -ERANGE;
+ if (!probe_pass)
+ insn->off = off;
+ return 0;
+}
+
+static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
+ const bool probe_pass)
+{
+ u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
struct bpf_insn *insn = prog->insnsi;
- u32 i, insn_cnt = prog->len;
- bool pseudo_call;
- u8 code;
- int off;
+ int ret = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
+ u8 code;
+
+ /* In the probing pass we still operate on the original,
+ * unpatched image in order to check overflows before we
+ * do any other adjustments. Therefore skip the patchlet.
+ */
+ if (probe_pass && i == pos) {
+ i += delta + 1;
+ insn++;
+ }
code = insn->code;
- if (BPF_CLASS(code) != BPF_JMP)
- continue;
- if (BPF_OP(code) == BPF_EXIT)
+ if (BPF_CLASS(code) != BPF_JMP ||
+ BPF_OP(code) == BPF_EXIT)
continue;
+ /* Adjust offset of jmps if we cross patch boundaries. */
if (BPF_OP(code) == BPF_CALL) {
- if (insn->src_reg == BPF_PSEUDO_CALL)
- pseudo_call = true;
- else
+ if (insn->src_reg != BPF_PSEUDO_CALL)
continue;
+ ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
+ probe_pass);
} else {
- pseudo_call = false;
+ ret = bpf_adj_delta_to_off(insn, pos, delta, i,
+ probe_pass);
}
- off = pseudo_call ? insn->imm : insn->off;
-
- /* Adjust offset of jmps if we cross boundaries. */
- if (i < pos && i + off + 1 > pos)
- off += delta;
- else if (i > pos + delta && i + off + 1 <= pos + delta)
- off -= delta;
-
- if (pseudo_call)
- insn->imm = off;
- else
- insn->off = off;
+ if (ret)
+ break;
}
+
+ return ret;
}
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len)
{
u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
+ const u32 cnt_max = S16_MAX;
struct bpf_prog *prog_adj;
/* Since our patchlet doesn't expand the image, we're done. */
@@ -269,6 +306,15 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
insn_adj_cnt = prog->len + insn_delta;
+ /* Reject anything that would potentially let the insn->off
+ * target overflow when we have excessive program expansions.
+ * We need to probe here before we do any reallocation where
+ * we afterwards may not fail anymore.
+ */
+ if (insn_adj_cnt > cnt_max &&
+ bpf_adj_branches(prog, off, insn_delta, true))
+ return NULL;
+
/* Several new instructions need to be inserted. Make room
* for them. Likely, there's no need for a new allocation as
* last page could have large enough tailroom.
@@ -294,7 +340,11 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
sizeof(*patch) * insn_rest);
memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
- bpf_adj_branches(prog_adj, off, insn_delta);
+ /* We are guaranteed to not fail at this point, otherwise
+ * the ship has sailed to reverse to the original state. An
+ * overflow cannot happen at this point.
+ */
+ BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
return prog_adj;
}
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 098eca568c2b..95a84b2f10ce 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1703,11 +1703,11 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
* we increment the refcnt. If this is the case abort with an
* error.
*/
- verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
+ verdict = bpf_prog_inc_not_zero(verdict);
if (IS_ERR(verdict))
return PTR_ERR(verdict);
- parse = bpf_prog_inc_not_zero(stab->bpf_parse);
+ parse = bpf_prog_inc_not_zero(parse);
if (IS_ERR(parse)) {
bpf_prog_put(verdict);
return PTR_ERR(parse);
@@ -1715,12 +1715,12 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
}
if (tx_msg) {
- tx_msg = bpf_prog_inc_not_zero(stab->bpf_tx_msg);
+ tx_msg = bpf_prog_inc_not_zero(tx_msg);
if (IS_ERR(tx_msg)) {
- if (verdict)
- bpf_prog_put(verdict);
- if (parse)
+ if (parse && verdict) {
bpf_prog_put(parse);
+ bpf_prog_put(verdict);
+ }
return PTR_ERR(tx_msg);
}
}
@@ -1805,10 +1805,10 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
out_free:
smap_release_sock(psock, sock);
out_progs:
- if (verdict)
- bpf_prog_put(verdict);
- if (parse)
+ if (parse && verdict) {
bpf_prog_put(parse);
+ bpf_prog_put(verdict);
+ }
if (tx_msg)
bpf_prog_put(tx_msg);
write_unlock_bh(&sock->sk_callback_lock);