aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGuillaume Nault <gnault@redhat.com>2020-01-13 22:39:23 +0100
committerDavid S. Miller <davem@davemloft.net>2020-01-14 11:28:40 -0800
commit8d7e5dee972f1cde2ba96c621f1541fa36e7d4f4 (patch)
tree0f98073e97662ebd7ed7fee89403dbaa8bae13a9
parentnetns: protect netns ID lookups with RCU (diff)
downloadlinux-dev-8d7e5dee972f1cde2ba96c621f1541fa36e7d4f4.tar.xz
linux-dev-8d7e5dee972f1cde2ba96c621f1541fa36e7d4f4.zip
netns: don't disable BHs when locking "nsid_lock"
When peernet2id() had to lock "nsid_lock" before iterating through the nsid table, we had to disable BHs, because VXLAN can call peernet2id() from the xmit path: vxlan_xmit() -> vxlan_fdb_miss() -> vxlan_fdb_notify() -> __vxlan_fdb_notify() -> vxlan_fdb_info() -> peernet2id(). Now that peernet2id() uses RCU protection, "nsid_lock" isn't used in BH context anymore. Therefore, we can safely use plain spin_lock()/spin_unlock() and let BHs run when holding "nsid_lock". Signed-off-by: Guillaume Nault <gnault@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/core/net_namespace.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index e7a5ff4966c9..6412c1fbfcb5 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -237,10 +237,10 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
if (refcount_read(&net->count) == 0)
return NETNSA_NSID_NOT_ASSIGNED;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock(&net->nsid_lock);
id = __peernet2id(net, peer);
if (id >= 0) {
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
return id;
}
@@ -250,12 +250,12 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
* just been idr_remove()'d from there in cleanup_net().
*/
if (!maybe_get_net(peer)) {
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
return NETNSA_NSID_NOT_ASSIGNED;
}
id = alloc_netid(net, peer, -1);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
put_net(peer);
if (id < 0)
@@ -520,20 +520,20 @@ static void unhash_nsid(struct net *net, struct net *last)
for_each_net(tmp) {
int id;
- spin_lock_bh(&tmp->nsid_lock);
+ spin_lock(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
- spin_unlock_bh(&tmp->nsid_lock);
+ spin_unlock(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
GFP_KERNEL);
if (tmp == last)
break;
}
- spin_lock_bh(&net->nsid_lock);
+ spin_lock(&net->nsid_lock);
idr_destroy(&net->netns_ids);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
}
static LLIST_HEAD(cleanup_list);
@@ -746,9 +746,9 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
return PTR_ERR(peer);
}
- spin_lock_bh(&net->nsid_lock);
+ spin_lock(&net->nsid_lock);
if (__peernet2id(net, peer) >= 0) {
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
err = -EEXIST;
NL_SET_BAD_ATTR(extack, nla);
NL_SET_ERR_MSG(extack,
@@ -757,7 +757,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
}
err = alloc_netid(net, peer, nsid);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
nlh, GFP_KERNEL);