aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2014-05-05 08:56:09 +0800
committerDavid S. Miller <davem@davemloft.net>2014-05-05 17:26:43 -0400
commit5356f3d7d48af72eb2a14b643d5563f068c44fe0 (patch)
treee0ad9803f7fbf71b467ea9bb3e24f64dc673939a /net/tipc
parentnet: via-rhine: Convert #ifdef USE_MMIO to a runtime flag (diff)
downloadlinux-dev-5356f3d7d48af72eb2a14b643d5563f068c44fe0.tar.xz
linux-dev-5356f3d7d48af72eb2a14b643d5563f068c44fe0.zip
tipc: always use tipc_node_lock() to hold node lock
Although we obtain node lock with tipc_node_lock() in most time, there are still places where we directly use native spin lock interface to grab node lock. But as we will do more jobs in the future when node lock is released, we should ensure that tipc_node_lock() is always called when node lock is taken. Signed-off-by: Ying Xue <ying.xue@windriver.com> Reviewed-by: Erik Hugne <erik.hugne@ericsson.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/link.c12
-rw-r--r--net/tipc/name_distr.c6
2 files changed, 9 insertions, 9 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c723ee90219d..3a801452643d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -297,14 +297,14 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
rcu_read_lock();
list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
- spin_lock_bh(&n_ptr->lock);
+ tipc_node_lock(n_ptr);
l_ptr = n_ptr->links[bearer_id];
if (l_ptr) {
tipc_link_reset(l_ptr);
if (shutting_down || !tipc_node_is_up(n_ptr)) {
tipc_node_detach_link(l_ptr->owner, l_ptr);
tipc_link_reset_fragments(l_ptr);
- spin_unlock_bh(&n_ptr->lock);
+ tipc_node_unlock(n_ptr);
/* Nobody else can access this link now: */
del_timer_sync(&l_ptr->timer);
@@ -312,12 +312,12 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
} else {
/* Detach/delete when failover is finished: */
l_ptr->flags |= LINK_STOPPED;
- spin_unlock_bh(&n_ptr->lock);
+ tipc_node_unlock(n_ptr);
del_timer_sync(&l_ptr->timer);
}
continue;
}
- spin_unlock_bh(&n_ptr->lock);
+ tipc_node_unlock(n_ptr);
}
rcu_read_unlock();
}
@@ -474,11 +474,11 @@ void tipc_link_reset_list(unsigned int bearer_id)
rcu_read_lock();
list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
- spin_lock_bh(&n_ptr->lock);
+ tipc_node_lock(n_ptr);
l_ptr = n_ptr->links[bearer_id];
if (l_ptr)
tipc_link_reset(l_ptr);
- spin_unlock_bh(&n_ptr->lock);
+ tipc_node_unlock(n_ptr);
}
rcu_read_unlock();
}
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 974a73f3d876..8465263246c3 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -135,18 +135,18 @@ void named_cluster_distribute(struct sk_buff *buf)
rcu_read_lock();
list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
- spin_lock_bh(&n_ptr->lock);
+ tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[n_ptr->addr & 1];
if (l_ptr) {
buf_copy = skb_copy(buf, GFP_ATOMIC);
if (!buf_copy) {
- spin_unlock_bh(&n_ptr->lock);
+ tipc_node_unlock(n_ptr);
break;
}
msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
__tipc_link_xmit(l_ptr, buf_copy);
}
- spin_unlock_bh(&n_ptr->lock);
+ tipc_node_unlock(n_ptr);
}
rcu_read_unlock();