aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/node.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 22:03:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 22:03:58 -0700
commit80f232121b69cc69a31ccb2b38c1665d770b0710 (patch)
tree106263eac4ff03b899df695e00dd11e593e74fe2 /net/tipc/node.c
parentMerge tag 'devicetree-for-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff)
downloadlinux-dev-80f232121b69cc69a31ccb2b38c1665d770b0710.tar.xz
linux-dev-80f232121b69cc69a31ccb2b38c1665d770b0710.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support AES128-CCM ciphers in kTLS, from Vakul Garg. 2) Add fib_sync_mem to control the amount of dirty memory we allow to queue up between synchronize RCU calls, from David Ahern. 3) Make flow classifier more lockless, from Vlad Buslov. 4) Add PHY downshift support to aquantia driver, from Heiner Kallweit. 5) Add SKB cache for TCP rx and tx, from Eric Dumazet. This reduces contention on SLAB spinlocks in heavy RPC workloads. 6) Partial GSO offload support in XFRM, from Boris Pismenny. 7) Add fast link down support to ethtool, from Heiner Kallweit. 8) Use siphash for IP ID generator, from Eric Dumazet. 9) Pull nexthops even further out from ipv4/ipv6 routes and FIB entries, from David Ahern. 10) Move skb->xmit_more into a per-cpu variable, from Florian Westphal. 11) Improve eBPF verifier speed and increase maximum program size, from Alexei Starovoitov. 12) Eliminate per-bucket spinlocks in rhashtable, and instead use bit spinlocks. From Neil Brown. 13) Allow tunneling with GUE encap in ipvs, from Jacky Hu. 14) Improve link partner cap detection in generic PHY code, from Heiner Kallweit. 15) Add layer 2 encap support to bpf_skb_adjust_room(), from Alan Maguire. 16) Remove SKB list implementation assumptions in SCTP, your's truly. 17) Various cleanups, optimizations, and simplifications in r8169 driver. From Heiner Kallweit. 18) Add memory accounting on TX and RX path of SCTP, from Xin Long. 19) Switch PHY drivers over to use dynamic featue detection, from Heiner Kallweit. 20) Support flow steering without masking in dpaa2-eth, from Ioana Ciocoi. 21) Implement ndo_get_devlink_port in netdevsim driver, from Jiri Pirko. 22) Increase the strict parsing of current and future netlink attributes, also export such policies to userspace. From Johannes Berg. 23) Allow DSA tag drivers to be modular, from Andrew Lunn. 24) Remove legacy DSA probing support, also from Andrew Lunn. 25) Allow ll_temac driver to be used on non-x86 platforms, from Esben Haabendal. 26) Add a generic tracepoint for TX queue timeouts to ease debugging, from Cong Wang. 27) More indirect call optimizations, from Paolo Abeni" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1763 commits) cxgb4: Fix error path in cxgb4_init_module net: phy: improve pause mode reporting in phy_print_status dt-bindings: net: Fix a typo in the phy-mode list for ethernet bindings net: macb: Change interrupt and napi enable order in open net: ll_temac: Improve error message on error IRQ net/sched: remove block pointer from common offload structure net: ethernet: support of_get_mac_address new ERR_PTR error net: usb: smsc: fix warning reported by kbuild test robot staging: octeon-ethernet: Fix of_get_mac_address ERR_PTR check net: dsa: support of_get_mac_address new ERR_PTR error net: dsa: sja1105: Fix status initialization in sja1105_get_ethtool_stats vrf: sit mtu should not be updated when vrf netdev is the link net: dsa: Fix error cleanup path in dsa_init_module l2tp: Fix possible NULL pointer dereference taprio: add null check on sched_nest to avoid potential null pointer dereference net: mvpp2: cls: fix less than zero check on a u32 variable net_sched: sch_fq: handle non connected flows net_sched: sch_fq: do not assume EDT packets are ordered net: hns3: use devm_kcalloc when allocating desc_cb net: hns3: some cleanup for struct hns3_enet_ring ...
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r--net/tipc/node.c119
1 files changed, 90 insertions, 29 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index dd3b6dc17662..9e106d3ed187 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -375,14 +375,20 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
if (n->capabilities == capabilities)
goto exit;
/* Same node may come back with new capabilities */
- write_lock_bh(&n->lock);
+ tipc_node_write_lock(n);
n->capabilities = capabilities;
for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
l = n->links[bearer_id].link;
if (l)
tipc_link_update_caps(l, capabilities);
}
- write_unlock_bh(&n->lock);
+ tipc_node_write_unlock_fast(n);
+
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
goto exit;
}
n = kzalloc(sizeof(*n), GFP_ATOMIC);
@@ -433,6 +439,11 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
break;
}
list_add_tail_rcu(&n->list, &temp_node->list);
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
trace_tipc_node_create(n, true, " ");
exit:
spin_unlock_bh(&tn->node_list_lock);
@@ -589,6 +600,7 @@ static void tipc_node_clear_links(struct tipc_node *node)
*/
static bool tipc_node_cleanup(struct tipc_node *peer)
{
+ struct tipc_node *temp_node;
struct tipc_net *tn = tipc_net(peer->net);
bool deleted = false;
@@ -604,6 +616,13 @@ static bool tipc_node_cleanup(struct tipc_node *peer)
deleted = true;
}
tipc_node_write_unlock(peer);
+
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
+
spin_unlock_bh(&tn->node_list_lock);
return deleted;
}
@@ -695,7 +714,6 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
*slot0 = bearer_id;
*slot1 = bearer_id;
tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
- n->failover_sent = false;
n->action_flags |= TIPC_NOTIFY_NODE_UP;
tipc_link_set_active(nl, true);
tipc_bcast_add_peer(n->net, nl, xmitq);
@@ -738,6 +756,45 @@ static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
}
/**
+ * tipc_node_link_failover() - start failover in case "half-failover"
+ *
+ * This function is only called in a very special situation where link
+ * failover can be already started on peer node but not on this node.
+ * This can happen when e.g.
+ * 1. Both links <1A-2A>, <1B-2B> down
+ * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
+ * disturbance, wrong session, etc.)
+ * 3. Link <1B-2B> up
+ * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
+ * 5. Node B starts failover onto link <1B-2B>
+ *
+ * ==> Node A does never start link/node failover!
+ *
+ * @n: tipc node structure
+ * @l: link peer endpoint failingover (- can be NULL)
+ * @tnl: tunnel link
+ * @xmitq: queue for messages to be xmited on tnl link later
+ */
+static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
+ struct tipc_link *tnl,
+ struct sk_buff_head *xmitq)
+{
+ /* Avoid to be "self-failover" that can never end */
+ if (!tipc_link_is_up(tnl))
+ return;
+
+ tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+
+ n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
+ tipc_link_failover_prepare(l, tnl, xmitq);
+
+ if (l)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
+}
+
+/**
* __tipc_node_link_down - handle loss of link
*/
static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
@@ -1340,7 +1397,7 @@ static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
if (!attrs)
goto msg_full;
@@ -1656,14 +1713,16 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
tipc_link_inputq(l));
}
+
/* If parallel link was already down, and this happened before
- * the tunnel link came up, FAILOVER was never sent. Ensure that
- * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
+ * the tunnel link came up, node failover was never started.
+ * Ensure that a FAILOVER_MSG is sent to get peer out of
+ * NODE_FAILINGOVER state, also this node must accept
+ * TUNNEL_MSGs from peer.
*/
- if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
- tipc_link_create_dummy_tnl_msg(l, xmitq);
- n->failover_sent = true;
- }
+ if (n->state != NODE_FAILINGOVER)
+ tipc_node_link_failover(n, pl, l, xmitq);
+
/* If pkts arrive out of order, use lowest calculated syncpt */
if (less(syncpt, n->sync_point))
n->sync_point = syncpt;
@@ -1866,9 +1925,9 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[TIPC_NLA_NET])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
- info->attrs[TIPC_NLA_NET], tipc_nl_net_policy,
- info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
+ info->attrs[TIPC_NLA_NET],
+ tipc_nl_net_policy, info->extack);
if (err)
return err;
@@ -2024,9 +2083,9 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[TIPC_NLA_LINK])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
- info->attrs[TIPC_NLA_LINK],
- tipc_nl_link_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy, info->extack);
if (err)
return err;
@@ -2100,9 +2159,9 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[TIPC_NLA_LINK])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
- info->attrs[TIPC_NLA_LINK],
- tipc_nl_link_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy, info->extack);
if (err)
return err;
@@ -2165,9 +2224,9 @@ int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[TIPC_NLA_LINK])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
- info->attrs[TIPC_NLA_LINK],
- tipc_nl_link_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy, info->extack);
if (err)
return err;
@@ -2305,9 +2364,10 @@ int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[TIPC_NLA_MON])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_MON_MAX,
- info->attrs[TIPC_NLA_MON],
- tipc_nl_monitor_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
+ info->attrs[TIPC_NLA_MON],
+ tipc_nl_monitor_policy,
+ info->extack);
if (err)
return err;
@@ -2334,7 +2394,7 @@ static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_MON);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
if (!attrs)
goto msg_full;
@@ -2425,9 +2485,10 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
if (!attrs[TIPC_NLA_MON])
return -EINVAL;
- err = nla_parse_nested(mon, TIPC_NLA_MON_MAX,
- attrs[TIPC_NLA_MON],
- tipc_nl_monitor_policy, NULL);
+ err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
+ attrs[TIPC_NLA_MON],
+ tipc_nl_monitor_policy,
+ NULL);
if (err)
return err;