aboutsummaryrefslogtreecommitdiffstats
path: root/net/mptcp/subflow.c
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2020-03-27 14:48:50 -0700
committerDavid S. Miller <davem@davemloft.net>2020-03-29 22:14:49 -0700
commitfc518953bc9c8d7d33c6ab261995f5038f3c87f9 (patch)
tree9c4eebd575e693916feb8d234411d08a17ec430a /net/mptcp/subflow.c
parentmptcp: allow dumping subflow context to userspace (diff)
downloadlinux-dev-fc518953bc9c8d7d33c6ab261995f5038f3c87f9.tar.xz
linux-dev-fc518953bc9c8d7d33c6ab261995f5038f3c87f9.zip
mptcp: add and use MIB counter infrastructure
Exported via same /proc file as the Linux TCP MIB counters, so "netstat -s" or "nstat" will show them automatically. The MPTCP MIB counters are allocated in a distinct pcpu area in order to avoid bloating/wasting TCP pcpu memory. Counters are allocated once the first MPTCP socket is created in a network namespace and free'd on exit. If no sockets have been allocated, all-zero mptcp counters are shown. The MIB counter list is taken from the multipath-tcp.org kernel, but only a few counters have been picked up so far. The counter list can be increased at any time later on. v2 -> v3: - remove 'inline' in foo.c files (David S. Miller) Co-developed-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/mptcp/subflow.c')
-rw-r--r--net/mptcp/subflow.c33
1 files changed, 27 insertions, 6 deletions
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index c051db074708..b5180c81588e 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -20,6 +20,13 @@
#endif
#include <net/mptcp.h>
#include "protocol.h"
+#include "mib.h"
+
+static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
+ enum linux_mptcp_mib_field field)
+{
+ MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
+}
static int subflow_rebuild_header(struct sock *sk)
{
@@ -88,8 +95,7 @@ static bool subflow_token_join_request(struct request_sock *req,
msk = mptcp_token_get_sock(subflow_req->token);
if (!msk) {
- pr_debug("subflow_req=%p, token=%u - not found\n",
- subflow_req, subflow_req->token);
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
return false;
}
@@ -137,8 +143,14 @@ static void subflow_init_req(struct request_sock *req,
return;
#endif
- if (rx_opt.mptcp.mp_capable && rx_opt.mptcp.mp_join)
- return;
+ if (rx_opt.mptcp.mp_capable) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
+
+ if (rx_opt.mptcp.mp_join)
+ return;
+ } else if (rx_opt.mptcp.mp_join) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
+ }
if (rx_opt.mptcp.mp_capable && listener->request_mptcp) {
int err;
@@ -237,6 +249,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow, subflow->thmac,
subflow->remote_nonce);
if (!subflow_thmac_valid(subflow)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
subflow->mp_join = 0;
goto do_reset;
}
@@ -253,6 +266,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
goto do_reset;
subflow->conn_finished = 1;
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
} else {
do_reset:
tcp_send_active_reset(sk, GFP_ATOMIC);
@@ -382,8 +396,10 @@ create_msk:
opt_rx.mptcp.mp_join = 0;
mptcp_get_options(skb, &opt_rx);
if (!opt_rx.mptcp.mp_join ||
- !subflow_hmac_valid(req, &opt_rx))
+ !subflow_hmac_valid(req, &opt_rx)) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
return NULL;
+ }
}
create_child:
@@ -420,6 +436,8 @@ create_child:
ctx->conn = (struct sock *)owner;
if (!mptcp_finish_join(child))
goto close_child;
+
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
}
}
@@ -535,6 +553,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk)
data_len = mpext->data_len;
if (data_len == 0) {
pr_err("Infinite mapping not handled");
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
return MAPPING_INVALID;
}
@@ -578,8 +597,10 @@ static enum mapping_status get_mapping_status(struct sock *ssk)
/* If this skb data are fully covered by the current mapping,
* the new map would need caching, which is not supported
*/
- if (skb_is_fully_mapped(ssk, skb))
+ if (skb_is_fully_mapped(ssk, skb)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
return MAPPING_INVALID;
+ }
/* will validate the next map after consuming the current one */
return MAPPING_OK;