aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorVlad Buslov <vladbu@mellanox.com>2018-08-10 20:51:41 +0300
committerDavid S. Miller <davem@davemloft.net>2018-08-11 12:37:09 -0700
commit2142236b45843dbcbe9691d24cf06caff91a78fd (patch)
tree5427b32b398de532642c59b032961b7b99233c12 /net/sched
parentMerge branch 'net-sctp-Avoid-allocating-high-order-memory-with-kmalloc' (diff)
downloadlinux-dev-2142236b45843dbcbe9691d24cf06caff91a78fd.tar.xz
linux-dev-2142236b45843dbcbe9691d24cf06caff91a78fd.zip
net: sched: act_bpf: remove dependency on rtnl lock
Use tcf spinlock to protect bpf action private data from concurrent modification during dump and init. Remove rtnl lock assertion that is no longer necessary. Signed-off-by: Vlad Buslov <vladbu@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--net/sched/act_bpf.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 6203eb075c9a..9e8a33f9fee3 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -143,11 +143,12 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
.index = prog->tcf_index,
.refcnt = refcount_read(&prog->tcf_refcnt) - ref,
.bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
- .action = prog->tcf_action,
};
struct tcf_t tm;
int ret;
+ spin_lock(&prog->tcf_lock);
+ opt.action = prog->tcf_action;
if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -163,9 +164,11 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
TCA_ACT_BPF_PAD))
goto nla_put_failure;
+ spin_unlock(&prog->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock(&prog->tcf_lock);
nlmsg_trim(skb, tp);
return -1;
}
@@ -264,7 +267,7 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
{
cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
/* updates to prog->filter are prevented, since it's called either
- * with rtnl lock or during final cleanup in rcu callback
+ * with tcf lock or during final cleanup in rcu callback
*/
cfg->filter = rcu_dereference_protected(prog->filter, 1);
@@ -336,8 +339,8 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
goto out;
prog = to_bpf(*act);
- ASSERT_RTNL();
+ spin_lock(&prog->tcf_lock);
if (res != ACT_P_CREATED)
tcf_bpf_prog_fill_cfg(prog, &old);
@@ -349,6 +352,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
prog->tcf_action = parm->action;
rcu_assign_pointer(prog->filter, cfg.filter);
+ spin_unlock(&prog->tcf_lock);
if (res == ACT_P_CREATED) {
tcf_idr_insert(tn, *act);