aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/net_namespace.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/net_namespace.c')
-rw-r--r--net/core/net_namespace.c83
1 files changed, 58 insertions, 25 deletions
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 60a71be75aea..3cad5f51afd3 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -35,7 +35,7 @@ LIST_HEAD(net_namespace_list);
EXPORT_SYMBOL_GPL(net_namespace_list);
struct net init_net = {
- .count = ATOMIC_INIT(1),
+ .count = REFCOUNT_INIT(1),
.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
};
EXPORT_SYMBOL(init_net);
@@ -221,17 +221,26 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
*/
int peernet2id_alloc(struct net *net, struct net *peer)
{
- bool alloc;
+ bool alloc = false, alive = false;
int id;
- if (atomic_read(&net->count) == 0)
+ if (refcount_read(&net->count) == 0)
return NETNSA_NSID_NOT_ASSIGNED;
spin_lock_bh(&net->nsid_lock);
- alloc = atomic_read(&peer->count) == 0 ? false : true;
+ /*
+ * When peer is obtained from RCU lists, we may race with
+ * its cleanup. Check whether it's alive, and this guarantees
+ * we never hash a peer back to net->netns_ids, after it has
+ * just been idr_remove()'d from there in cleanup_net().
+ */
+ if (maybe_get_net(peer))
+ alive = alloc = true;
id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_bh(&net->nsid_lock);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id);
+ if (alive)
+ put_net(peer);
return id;
}
EXPORT_SYMBOL_GPL(peernet2id_alloc);
@@ -264,11 +273,9 @@ struct net *get_net_ns_by_id(struct net *net, int id)
return NULL;
rcu_read_lock();
- spin_lock_bh(&net->nsid_lock);
peer = idr_find(&net->netns_ids, id);
if (peer)
peer = maybe_get_net(peer);
- spin_unlock_bh(&net->nsid_lock);
rcu_read_unlock();
return peer;
@@ -284,7 +291,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
int error = 0;
LIST_HEAD(net_exit_list);
- atomic_set(&net->count, 1);
+ refcount_set(&net->count, 1);
refcount_set(&net->passive, 1);
net->dev_base_seq = 1;
net->user_ns = user_ns;
@@ -432,13 +439,40 @@ struct net *copy_net_ns(unsigned long flags,
return net;
}
+static void unhash_nsid(struct net *net, struct net *last)
+{
+ struct net *tmp;
+ /* This function is only called from cleanup_net() work,
+ * and this work is the only process, that may delete
+ * a net from net_namespace_list. So, when the below
+ * is executing, the list may only grow. Thus, we do not
+ * use for_each_net_rcu() or rtnl_lock().
+ */
+ for_each_net(tmp) {
+ int id;
+
+ spin_lock_bh(&tmp->nsid_lock);
+ id = __peernet2id(tmp, net);
+ if (id >= 0)
+ idr_remove(&tmp->netns_ids, id);
+ spin_unlock_bh(&tmp->nsid_lock);
+ if (id >= 0)
+ rtnl_net_notifyid(tmp, RTM_DELNSID, id);
+ if (tmp == last)
+ break;
+ }
+ spin_lock_bh(&net->nsid_lock);
+ idr_destroy(&net->netns_ids);
+ spin_unlock_bh(&net->nsid_lock);
+}
+
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
- struct net *net, *tmp;
+ struct net *net, *tmp, *last;
struct list_head net_kill_list;
LIST_HEAD(net_exit_list);
@@ -451,26 +485,25 @@ static void cleanup_net(struct work_struct *work)
/* Don't let anyone else find us. */
rtnl_lock();
- list_for_each_entry(net, &net_kill_list, cleanup_list) {
+ list_for_each_entry(net, &net_kill_list, cleanup_list)
list_del_rcu(&net->list);
- list_add_tail(&net->exit_list, &net_exit_list);
- for_each_net(tmp) {
- int id;
-
- spin_lock_bh(&tmp->nsid_lock);
- id = __peernet2id(tmp, net);
- if (id >= 0)
- idr_remove(&tmp->netns_ids, id);
- spin_unlock_bh(&tmp->nsid_lock);
- if (id >= 0)
- rtnl_net_notifyid(tmp, RTM_DELNSID, id);
- }
- spin_lock_bh(&net->nsid_lock);
- idr_destroy(&net->netns_ids);
- spin_unlock_bh(&net->nsid_lock);
+ /* Cache last net. After we unlock rtnl, no one new net
+ * added to net_namespace_list can assign nsid pointer
+ * to a net from net_kill_list (see peernet2id_alloc()).
+ * So, we skip them in unhash_nsid().
+ *
+ * Note, that unhash_nsid() does not delete nsid links
+ * between net_kill_list's nets, as they've already
+ * deleted from net_namespace_list. But, this would be
+ * useless anyway, as netns_ids are destroyed there.
+ */
+ last = list_last_entry(&net_namespace_list, struct net, list);
+ rtnl_unlock();
+ list_for_each_entry(net, &net_kill_list, cleanup_list) {
+ unhash_nsid(net, last);
+ list_add_tail(&net->exit_list, &net_exit_list);
}
- rtnl_unlock();
/*
* Another CPU might be rcu-iterating the list, wait for it.