aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c101
-rw-r--r--net/ipv4/af_inet.c20
-rw-r--r--net/ipv6/af_inet6.c22
-rw-r--r--net/ipv6/ip6_fib.c60
-rw-r--r--net/ipv6/route.c42
-rw-r--r--net/netlink/af_netlink.c92
-rw-r--r--net/xdp/xdp_umem.c21
-rw-r--r--net/xdp/xsk.c8
-rw-r--r--net/xdp/xsk_queue.c4
-rw-r--r--net/xdp/xsk_queue.h8
10 files changed, 308 insertions, 70 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index dfaf5df13722..5815902bb617 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4003,16 +4003,22 @@ static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
};
#ifdef CONFIG_SOCK_CGROUP_DATA
+static inline u64 __bpf_sk_cgroup_id(struct sock *sk)
+{
+ struct cgroup *cgrp;
+
+ cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ return cgroup_id(cgrp);
+}
+
BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
{
struct sock *sk = skb_to_full_sk(skb);
- struct cgroup *cgrp;
if (!sk || !sk_fullsock(sk))
return 0;
- cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- return cgroup_id(cgrp);
+ return __bpf_sk_cgroup_id(sk);
}
static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
@@ -4022,16 +4028,12 @@ static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
- ancestor_level)
+static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
+ int ancestor_level)
{
- struct sock *sk = skb_to_full_sk(skb);
struct cgroup *ancestor;
struct cgroup *cgrp;
- if (!sk || !sk_fullsock(sk))
- return 0;
-
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
ancestor = cgroup_ancestor(cgrp, ancestor_level);
if (!ancestor)
@@ -4040,6 +4042,17 @@ BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
return cgroup_id(ancestor);
}
+BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
+ ancestor_level)
+{
+ struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk || !sk_fullsock(sk))
+ return 0;
+
+ return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level);
+}
+
static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
.func = bpf_skb_ancestor_cgroup_id,
.gpl_only = false,
@@ -4047,6 +4060,31 @@ static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
+
+BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk)
+{
+ return __bpf_sk_cgroup_id(sk);
+}
+
+static const struct bpf_func_proto bpf_sk_cgroup_id_proto = {
+ .func = bpf_sk_cgroup_id,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_SOCKET,
+};
+
+BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level)
+{
+ return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level);
+}
+
+static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = {
+ .func = bpf_sk_ancestor_cgroup_id,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_SOCKET,
+ .arg2_type = ARG_ANYTHING,
+};
#endif
static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
@@ -4525,30 +4563,28 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
{
#ifdef CONFIG_INET
struct sock *sk = ctx->sk;
+ u32 flags = BIND_FROM_BPF;
int err;
- /* Binding to port can be expensive so it's prohibited in the helper.
- * Only binding to IP is supported.
- */
err = -EINVAL;
if (addr_len < offsetofend(struct sockaddr, sa_family))
return err;
if (addr->sa_family == AF_INET) {
if (addr_len < sizeof(struct sockaddr_in))
return err;
- if (((struct sockaddr_in *)addr)->sin_port != htons(0))
- return err;
- return __inet_bind(sk, addr, addr_len, true, false);
+ if (((struct sockaddr_in *)addr)->sin_port == htons(0))
+ flags |= BIND_FORCE_ADDRESS_NO_PORT;
+ return __inet_bind(sk, addr, addr_len, flags);
#if IS_ENABLED(CONFIG_IPV6)
} else if (addr->sa_family == AF_INET6) {
if (addr_len < SIN6_LEN_RFC2133)
return err;
- if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
- return err;
+ if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0))
+ flags |= BIND_FORCE_ADDRESS_NO_PORT;
/* ipv6_bpf_stub cannot be NULL, since it's called from
* bpf_cgroup_inet6_connect hook and ipv6 is already loaded
*/
- return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
+ return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags);
#endif /* CONFIG_IPV6 */
}
#endif /* CONFIG_INET */
@@ -6159,8 +6195,22 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#ifdef CONFIG_SOCK_CGROUP_DATA
case BPF_FUNC_skb_cgroup_id:
return &bpf_skb_cgroup_id_proto;
+ case BPF_FUNC_skb_ancestor_cgroup_id:
+ return &bpf_skb_ancestor_cgroup_id_proto;
+ case BPF_FUNC_sk_cgroup_id:
+ return &bpf_sk_cgroup_id_proto;
+ case BPF_FUNC_sk_ancestor_cgroup_id:
+ return &bpf_sk_ancestor_cgroup_id_proto;
#endif
#ifdef CONFIG_INET
+ case BPF_FUNC_sk_lookup_tcp:
+ return &bpf_sk_lookup_tcp_proto;
+ case BPF_FUNC_sk_lookup_udp:
+ return &bpf_sk_lookup_udp_proto;
+ case BPF_FUNC_sk_release:
+ return &bpf_sk_release_proto;
+ case BPF_FUNC_skc_lookup_tcp:
+ return &bpf_skc_lookup_tcp_proto;
case BPF_FUNC_tcp_sock:
return &bpf_tcp_sock_proto;
case BPF_FUNC_get_listener_sock:
@@ -7031,6 +7081,7 @@ static bool sock_addr_is_valid_access(int off, int size,
case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
msg_src_ip6[3]):
+ case bpf_ctx_range(struct bpf_sock_addr, user_port):
if (type == BPF_READ) {
bpf_ctx_record_field_size(info, size_default);
@@ -7061,10 +7112,6 @@ static bool sock_addr_is_valid_access(int off, int size,
return false;
}
break;
- case bpf_ctx_range(struct bpf_sock_addr, user_port):
- if (size != size_default)
- return false;
- break;
case offsetof(struct bpf_sock_addr, sk):
if (type != BPF_READ)
return false;
@@ -7960,8 +8007,8 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{
+ int off, port_size = sizeof_field(struct sockaddr_in6, sin6_port);
struct bpf_insn *insn = insn_buf;
- int off;
switch (si->off) {
case offsetof(struct bpf_sock_addr, user_family):
@@ -7996,9 +8043,11 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
offsetof(struct sockaddr_in6, sin6_port));
BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) !=
sizeof_field(struct sockaddr_in6, sin6_port));
- SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
- struct sockaddr_in6, uaddr,
- sin6_port, tmp_reg);
+ /* Account for sin6_port being smaller than user_port. */
+ port_size = min(port_size, BPF_LDST_BYTES(si));
+ SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
+ struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
+ sin6_port, bytes_to_bpf_size(port_size), 0, tmp_reg);
break;
case offsetof(struct bpf_sock_addr, family):
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6177c4ba0037..fcf0d12a407a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -450,12 +450,12 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (err)
return err;
- return __inet_bind(sk, uaddr, addr_len, false, true);
+ return __inet_bind(sk, uaddr, addr_len, BIND_WITH_LOCK);
}
EXPORT_SYMBOL(inet_bind);
int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
- bool force_bind_address_no_port, bool with_lock)
+ u32 flags)
{
struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
struct inet_sock *inet = inet_sk(sk);
@@ -506,7 +506,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
* would be illegal to use them (multicast/broadcast) in
* which case the sending device address is used.
*/
- if (with_lock)
+ if (flags & BIND_WITH_LOCK)
lock_sock(sk);
/* Check these errors (active socket, double bind). */
@@ -520,16 +520,18 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
/* Make sure we are allowed to bind here. */
if (snum || !(inet->bind_address_no_port ||
- force_bind_address_no_port)) {
+ (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
if (sk->sk_prot->get_port(sk, snum)) {
inet->inet_saddr = inet->inet_rcv_saddr = 0;
err = -EADDRINUSE;
goto out_release_sock;
}
- err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
- if (err) {
- inet->inet_saddr = inet->inet_rcv_saddr = 0;
- goto out_release_sock;
+ if (!(flags & BIND_FROM_BPF)) {
+ err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
+ if (err) {
+ inet->inet_saddr = inet->inet_rcv_saddr = 0;
+ goto out_release_sock;
+ }
}
}
@@ -543,7 +545,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
sk_dst_reset(sk);
err = 0;
out_release_sock:
- if (with_lock)
+ if (flags & BIND_WITH_LOCK)
release_sock(sk);
out:
return err;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 345baa0a754f..771a462a8322 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -273,7 +273,7 @@ out_rcu_unlock:
}
static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
- bool force_bind_address_no_port, bool with_lock)
+ u32 flags)
{
struct sockaddr_in6 *addr = (struct sockaddr_in6 *)uaddr;
struct inet_sock *inet = inet_sk(sk);
@@ -297,7 +297,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
- if (with_lock)
+ if (flags & BIND_WITH_LOCK)
lock_sock(sk);
/* Check these errors (active socket, double bind). */
@@ -400,18 +400,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
/* Make sure we are allowed to bind here. */
if (snum || !(inet->bind_address_no_port ||
- force_bind_address_no_port)) {
+ (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
if (sk->sk_prot->get_port(sk, snum)) {
sk->sk_ipv6only = saved_ipv6only;
inet_reset_saddr(sk);
err = -EADDRINUSE;
goto out;
}
- err = BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk);
- if (err) {
- sk->sk_ipv6only = saved_ipv6only;
- inet_reset_saddr(sk);
- goto out;
+ if (!(flags & BIND_FROM_BPF)) {
+ err = BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk);
+ if (err) {
+ sk->sk_ipv6only = saved_ipv6only;
+ inet_reset_saddr(sk);
+ goto out;
+ }
}
}
@@ -423,7 +425,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
inet->inet_dport = 0;
inet->inet_daddr = 0;
out:
- if (with_lock)
+ if (flags & BIND_WITH_LOCK)
release_sock(sk);
return err;
out_unlock:
@@ -451,7 +453,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (err)
return err;
- return __inet6_bind(sk, uaddr, addr_len, false, true);
+ return __inet6_bind(sk, uaddr, addr_len, BIND_WITH_LOCK);
}
EXPORT_SYMBOL(inet6_bind);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 46ed56719476..250ff52c674e 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -2467,7 +2467,7 @@ void fib6_gc_cleanup(void)
}
#ifdef CONFIG_PROC_FS
-static int ipv6_route_seq_show(struct seq_file *seq, void *v)
+static int ipv6_route_native_seq_show(struct seq_file *seq, void *v)
{
struct fib6_info *rt = v;
struct ipv6_route_iter *iter = seq->private;
@@ -2625,7 +2625,7 @@ static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
return w->node && !(w->state == FWS_U && w->node == w->root);
}
-static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
+static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v)
__releases(RCU_BH)
{
struct net *net = seq_file_net(seq);
@@ -2637,6 +2637,62 @@ static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
rcu_read_unlock_bh();
}
+#if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)
+static int ipv6_route_prog_seq_show(struct bpf_prog *prog,
+ struct bpf_iter_meta *meta,
+ void *v)
+{
+ struct bpf_iter__ipv6_route ctx;
+
+ ctx.meta = meta;
+ ctx.rt = v;
+ return bpf_iter_run_prog(prog, &ctx);
+}
+
+static int ipv6_route_seq_show(struct seq_file *seq, void *v)
+{
+ struct ipv6_route_iter *iter = seq->private;
+ struct bpf_iter_meta meta;
+ struct bpf_prog *prog;
+ int ret;
+
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, false);
+ if (!prog)
+ return ipv6_route_native_seq_show(seq, v);
+
+ ret = ipv6_route_prog_seq_show(prog, &meta, v);
+ iter->w.leaf = NULL;
+
+ return ret;
+}
+
+static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
+{
+ struct bpf_iter_meta meta;
+ struct bpf_prog *prog;
+
+ if (!v) {
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, true);
+ if (prog)
+ (void)ipv6_route_prog_seq_show(prog, &meta, v);
+ }
+
+ ipv6_route_native_seq_stop(seq, v);
+}
+#else
+static int ipv6_route_seq_show(struct seq_file *seq, void *v)
+{
+ return ipv6_route_native_seq_show(seq, v);
+}
+
+static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
+{
+ ipv6_route_native_seq_stop(seq, v);
+}
+#endif
+
const struct seq_operations ipv6_route_seq_ops = {
.start = ipv6_route_seq_start,
.next = ipv6_route_seq_next,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4292653af533..fcf0d5c87d09 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -6421,6 +6421,35 @@ void __init ip6_route_init_special_entries(void)
#endif
}
+#if IS_BUILTIN(CONFIG_IPV6)
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
+DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
+
+static const struct bpf_iter_reg ipv6_route_reg_info = {
+ .target = "ipv6_route",
+ .seq_ops = &ipv6_route_seq_ops,
+ .init_seq_private = bpf_iter_init_seq_net,
+ .fini_seq_private = bpf_iter_fini_seq_net,
+ .seq_priv_size = sizeof(struct ipv6_route_iter),
+ .ctx_arg_info_size = 1,
+ .ctx_arg_info = {
+ { offsetof(struct bpf_iter__ipv6_route, rt),
+ PTR_TO_BTF_ID_OR_NULL },
+ },
+};
+
+static int __init bpf_iter_register(void)
+{
+ return bpf_iter_reg_target(&ipv6_route_reg_info);
+}
+
+static void bpf_iter_unregister(void)
+{
+ bpf_iter_unreg_target(&ipv6_route_reg_info);
+}
+#endif
+#endif
+
int __init ip6_route_init(void)
{
int ret;
@@ -6483,6 +6512,14 @@ int __init ip6_route_init(void)
if (ret)
goto out_register_late_subsys;
+#if IS_BUILTIN(CONFIG_IPV6)
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
+ ret = bpf_iter_register();
+ if (ret)
+ goto out_register_late_subsys;
+#endif
+#endif
+
for_each_possible_cpu(cpu) {
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
@@ -6515,6 +6552,11 @@ out_kmem_cache:
void ip6_route_cleanup(void)
{
+#if IS_BUILTIN(CONFIG_IPV6)
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
+ bpf_iter_unregister();
+#endif
+#endif
unregister_netdevice_notifier(&ip6_route_dev_notifier);
unregister_pernet_subsys(&ip6_route_net_late_ops);
fib6_rules_cleanup();
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5ded01ca8b20..4f2c3b14ddbf 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2596,7 +2596,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return __netlink_seq_next(seq);
}
-static void netlink_seq_stop(struct seq_file *seq, void *v)
+static void netlink_native_seq_stop(struct seq_file *seq, void *v)
{
struct nl_seq_iter *iter = seq->private;
@@ -2607,7 +2607,7 @@ static void netlink_seq_stop(struct seq_file *seq, void *v)
}
-static int netlink_seq_show(struct seq_file *seq, void *v)
+static int netlink_native_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
@@ -2634,6 +2634,68 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
return 0;
}
+#ifdef CONFIG_BPF_SYSCALL
+struct bpf_iter__netlink {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct netlink_sock *, sk);
+};
+
+DEFINE_BPF_ITER_FUNC(netlink, struct bpf_iter_meta *meta, struct netlink_sock *sk)
+
+static int netlink_prog_seq_show(struct bpf_prog *prog,
+ struct bpf_iter_meta *meta,
+ void *v)
+{
+ struct bpf_iter__netlink ctx;
+
+ meta->seq_num--; /* skip SEQ_START_TOKEN */
+ ctx.meta = meta;
+ ctx.sk = nlk_sk((struct sock *)v);
+ return bpf_iter_run_prog(prog, &ctx);
+}
+
+static int netlink_seq_show(struct seq_file *seq, void *v)
+{
+ struct bpf_iter_meta meta;
+ struct bpf_prog *prog;
+
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, false);
+ if (!prog)
+ return netlink_native_seq_show(seq, v);
+
+ if (v != SEQ_START_TOKEN)
+ return netlink_prog_seq_show(prog, &meta, v);
+
+ return 0;
+}
+
+static void netlink_seq_stop(struct seq_file *seq, void *v)
+{
+ struct bpf_iter_meta meta;
+ struct bpf_prog *prog;
+
+ if (!v) {
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, true);
+ if (prog)
+ (void)netlink_prog_seq_show(prog, &meta, v);
+ }
+
+ netlink_native_seq_stop(seq, v);
+}
+#else
+static int netlink_seq_show(struct seq_file *seq, void *v)
+{
+ return netlink_native_seq_show(seq, v);
+}
+
+static void netlink_seq_stop(struct seq_file *seq, void *v)
+{
+ netlink_native_seq_stop(seq, v);
+}
+#endif
+
static const struct seq_operations netlink_seq_ops = {
.start = netlink_seq_start,
.next = netlink_seq_next,
@@ -2740,6 +2802,26 @@ static const struct rhashtable_params netlink_rhashtable_params = {
.automatic_shrinking = true,
};
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
+static const struct bpf_iter_reg netlink_reg_info = {
+ .target = "netlink",
+ .seq_ops = &netlink_seq_ops,
+ .init_seq_private = bpf_iter_init_seq_net,
+ .fini_seq_private = bpf_iter_fini_seq_net,
+ .seq_priv_size = sizeof(struct nl_seq_iter),
+ .ctx_arg_info_size = 1,
+ .ctx_arg_info = {
+ { offsetof(struct bpf_iter__netlink, sk),
+ PTR_TO_BTF_ID_OR_NULL },
+ },
+};
+
+static int __init bpf_iter_register(void)
+{
+ return bpf_iter_reg_target(&netlink_reg_info);
+}
+#endif
+
static int __init netlink_proto_init(void)
{
int i;
@@ -2748,6 +2830,12 @@ static int __init netlink_proto_init(void)
if (err != 0)
goto out;
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
+ err = bpf_iter_register();
+ if (err)
+ goto out;
+#endif
+
BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb));
nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index ed7a6060f73c..37ace3bc0d48 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -30,9 +30,9 @@ void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
if (!xs->tx)
return;
- spin_lock_irqsave(&umem->xsk_list_lock, flags);
- list_add_rcu(&xs->list, &umem->xsk_list);
- spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
+ spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
+ list_add_rcu(&xs->list, &umem->xsk_tx_list);
+ spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
}
void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
@@ -42,9 +42,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
if (!xs->tx)
return;
- spin_lock_irqsave(&umem->xsk_list_lock, flags);
+ spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
list_del_rcu(&xs->list);
- spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
+ spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
}
/* The umem is stored both in the _rx struct and the _tx struct as we do
@@ -279,7 +279,7 @@ void xdp_put_umem(struct xdp_umem *umem)
}
}
-static int xdp_umem_pin_pages(struct xdp_umem *umem)
+static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
{
unsigned int gup_flags = FOLL_WRITE;
long npgs;
@@ -291,7 +291,7 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
return -ENOMEM;
down_read(&current->mm->mmap_sem);
- npgs = pin_user_pages(umem->address, umem->npgs,
+ npgs = pin_user_pages(address, umem->npgs,
gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
up_read(&current->mm->mmap_sem);
@@ -385,7 +385,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
return -EINVAL;
- umem->address = (unsigned long)addr;
umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK
: ~((u64)chunk_size - 1);
umem->size = size;
@@ -395,8 +394,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->pgs = NULL;
umem->user = NULL;
umem->flags = mr->flags;
- INIT_LIST_HEAD(&umem->xsk_list);
- spin_lock_init(&umem->xsk_list_lock);
+ INIT_LIST_HEAD(&umem->xsk_tx_list);
+ spin_lock_init(&umem->xsk_tx_list_lock);
refcount_set(&umem->users, 1);
@@ -404,7 +403,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (err)
return err;
- err = xdp_umem_pin_pages(umem);
+ err = xdp_umem_pin_pages(umem, (unsigned long)addr);
if (err)
goto out_account;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index f6e6609f70a3..45ffd67b367d 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -75,7 +75,7 @@ void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
return;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
+ list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
}
rcu_read_unlock();
@@ -102,7 +102,7 @@ void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
return;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
+ list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
}
rcu_read_unlock();
@@ -305,7 +305,7 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem)
struct xdp_sock *xs;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
+ list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
__xskq_cons_release(xs->tx);
xs->sk.sk_write_space(&xs->sk);
}
@@ -318,7 +318,7 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
struct xdp_sock *xs;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
+ list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
if (!xskq_cons_peek_desc(xs->tx, desc, umem))
continue;
diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
index c90e9c1e3c63..57fb81bd593c 100644
--- a/net/xdp/xsk_queue.c
+++ b/net/xdp/xsk_queue.c
@@ -9,12 +9,12 @@
#include "xsk_queue.h"
-void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
+void xskq_set_umem(struct xsk_queue *q, u64 umem_size, u64 chunk_mask)
{
if (!q)
return;
- q->size = size;
+ q->umem_size = umem_size;
q->chunk_mask = chunk_mask;
}
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index b50bb5c76da5..648733ec24ac 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -30,7 +30,7 @@ struct xdp_umem_ring {
struct xsk_queue {
u64 chunk_mask;
- u64 size;
+ u64 umem_size;
u32 ring_mask;
u32 nentries;
u32 cached_prod;
@@ -123,7 +123,7 @@ static inline bool xskq_cons_is_valid_unaligned(struct xsk_queue *q,
u64 base_addr = xsk_umem_extract_addr(addr);
addr = xsk_umem_add_offset_to_addr(addr);
- if (base_addr >= q->size || addr >= q->size ||
+ if (base_addr >= q->umem_size || addr >= q->umem_size ||
xskq_cons_crosses_non_contig_pg(umem, addr, length)) {
q->invalid_descs++;
return false;
@@ -134,7 +134,7 @@ static inline bool xskq_cons_is_valid_unaligned(struct xsk_queue *q,
static inline bool xskq_cons_is_valid_addr(struct xsk_queue *q, u64 addr)
{
- if (addr >= q->size) {
+ if (addr >= q->umem_size) {
q->invalid_descs++;
return false;
}
@@ -379,7 +379,7 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
return q ? q->invalid_descs : 0;
}
-void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
+void xskq_set_umem(struct xsk_queue *q, u64 umem_size, u64 chunk_mask);
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q_ops);