aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/cls_flower.c
diff options
context:
space:
mode:
authorVlad Buslov <vladbu@mellanox.com>2019-03-21 15:17:34 +0200
committerDavid S. Miller <davem@davemloft.net>2019-03-21 14:32:17 -0700
commit620da4860827e1d0ddc7b941d43920c19314de0e (patch)
tree4ca9abdf57ceb33140b15af79270320c8ab24482 /net/sched/cls_flower.c
parentnet: sched: flower: don't check for rtnl on head dereference (diff)
downloadlinux-dev-620da4860827e1d0ddc7b941d43920c19314de0e.tar.xz
linux-dev-620da4860827e1d0ddc7b941d43920c19314de0e.zip
net: sched: flower: refactor fl_change
As a preparation for using classifier spinlock instead of relying on external rtnl lock, rearrange code in fl_change. The goal is to group the code which changes classifier state in single block in order to allow following commits in this set to protect it from parallel modification with tp->lock. Data structures that require tp->lock protection are mask hashtable and filters list, and classifier handle_idr. fl_hw_replace_filter() is a sleeping function and cannot be called while holding a spinlock. In order to execute all sequence of changes to shared classifier data structures atomically, call fl_hw_replace_filter() before modifying them. Signed-off-by: Vlad Buslov <vladbu@mellanox.com> Reviewed-by: Stefano Brivio <sbrivio@redhat.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--net/sched/cls_flower.c80
1 files changed, 41 insertions, 39 deletions
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index dcf3aee5697e..d36ceb5001f9 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1376,73 +1376,75 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (err)
goto errout;
- if (!handle) {
- handle = 1;
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
- INT_MAX, GFP_KERNEL);
- } else if (!fold) {
- /* user specifies a handle and it doesn't exist */
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
- handle, GFP_KERNEL);
- }
- if (err)
- goto errout_mask;
- fnew->handle = handle;
-
- if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) {
- err = -EEXIST;
- goto errout_idr;
- }
-
- err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
- fnew->mask->filter_ht_params);
- if (err)
- goto errout_idr;
-
if (!tc_skip_hw(fnew->flags)) {
err = fl_hw_replace_filter(tp, fnew, extack);
if (err)
- goto errout_mask_ht;
+ goto errout_mask;
}
if (!tc_in_hw(fnew->flags))
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
if (fold) {
+ fnew->handle = handle;
+
+ err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
+ fnew->mask->filter_ht_params);
+ if (err)
+ goto errout_hw;
+
rhashtable_remove_fast(&fold->mask->ht,
&fold->ht_node,
fold->mask->filter_ht_params);
- if (!tc_skip_hw(fold->flags))
- fl_hw_destroy_filter(tp, fold, NULL);
- }
-
- *arg = fnew;
-
- if (fold) {
idr_replace(&head->handle_idr, fnew, fnew->handle);
list_replace_rcu(&fold->list, &fnew->list);
+
+ if (!tc_skip_hw(fold->flags))
+ fl_hw_destroy_filter(tp, fold, NULL);
tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
tcf_queue_work(&fold->rwork, fl_destroy_filter_work);
} else {
+ if (__fl_lookup(fnew->mask, &fnew->mkey)) {
+ err = -EEXIST;
+ goto errout_hw;
+ }
+
+ if (handle) {
+ /* user specifies a handle and it doesn't exist */
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ handle, GFP_ATOMIC);
+ } else {
+ handle = 1;
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ INT_MAX, GFP_ATOMIC);
+ }
+ if (err)
+ goto errout_hw;
+
+ fnew->handle = handle;
+
+ err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
+ fnew->mask->filter_ht_params);
+ if (err)
+ goto errout_idr;
+
list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
}
+ *arg = fnew;
+
kfree(tb);
kfree(mask);
return 0;
-errout_mask_ht:
- rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
- fnew->mask->filter_ht_params);
-
errout_idr:
- if (!fold)
- idr_remove(&head->handle_idr, fnew->handle);
-
+ idr_remove(&head->handle_idr, fnew->handle);
+errout_hw:
+ if (!tc_skip_hw(fnew->flags))
+ fl_hw_destroy_filter(tp, fnew, NULL);
errout_mask:
fl_mask_put(head, fnew->mask, false);
-
errout:
tcf_exts_destroy(&fnew->exts);
kfree(fnew);