aboutsummaryrefslogtreecommitdiffstats
path: root/net/smc
diff options
context:
space:
mode:
Diffstat (limited to 'net/smc')
-rw-r--r--net/smc/Makefile2
-rw-r--r--net/smc/af_smc.c104
-rw-r--r--net/smc/smc_core.c28
-rw-r--r--net/smc/smc_ism.c1
-rw-r--r--net/smc/smc_netlink.c11
-rw-r--r--net/smc/smc_netlink.h2
-rw-r--r--net/smc/smc_rx.c8
-rw-r--r--net/smc/smc_stats.c413
-rw-r--r--net/smc/smc_stats.h266
-rw-r--r--net/smc/smc_tx.c23
10 files changed, 818 insertions, 40 deletions
diff --git a/net/smc/Makefile b/net/smc/Makefile
index 77e54fe42b1c..99a0186cba5b 100644
--- a/net/smc/Makefile
+++ b/net/smc/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_SMC) += smc.o
obj-$(CONFIG_SMC_DIAG) += smc_diag.o
smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
-smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o
+smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o smc_stats.o
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5eff7cccceff..898389611ae8 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -49,6 +49,7 @@
#include "smc_tx.h"
#include "smc_rx.h"
#include "smc_close.h"
+#include "smc_stats.h"
static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
* creation on server
@@ -508,9 +509,44 @@ static void smc_link_save_peer_info(struct smc_link *link,
link->peer_mtu = clc->r0.qp_mtu;
}
-static void smc_switch_to_fallback(struct smc_sock *smc)
+static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
+ struct smc_stats_fback *fback_arr)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
+ if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
+ fback_arr[cnt].count++;
+ break;
+ }
+ if (!fback_arr[cnt].fback_code) {
+ fback_arr[cnt].fback_code = smc->fallback_rsn;
+ fback_arr[cnt].count++;
+ break;
+ }
+ }
+}
+
+static void smc_stat_fallback(struct smc_sock *smc)
+{
+ struct net *net = sock_net(&smc->sk);
+
+ mutex_lock(&net->smc.mutex_fback_rsn);
+ if (smc->listen_smc) {
+ smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
+ net->smc.fback_rsn->srv_fback_cnt++;
+ } else {
+ smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
+ net->smc.fback_rsn->clnt_fback_cnt++;
+ }
+ mutex_unlock(&net->smc.mutex_fback_rsn);
+}
+
+static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
{
smc->use_fallback = true;
+ smc->fallback_rsn = reason_code;
+ smc_stat_fallback(smc);
if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
smc->clcsock->file = smc->sk.sk_socket->file;
smc->clcsock->file->private_data = smc->clcsock;
@@ -522,8 +558,7 @@ static void smc_switch_to_fallback(struct smc_sock *smc)
/* fall back during connect */
static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
{
- smc_switch_to_fallback(smc);
- smc->fallback_rsn = reason_code;
+ smc_switch_to_fallback(smc, reason_code);
smc_copy_sock_settings_to_clc(smc);
smc->connect_nonblock = 0;
if (smc->sk.sk_state == SMC_INIT)
@@ -535,9 +570,11 @@ static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
u8 version)
{
+ struct net *net = sock_net(&smc->sk);
int rc;
if (reason_code < 0) { /* error, fallback is not possible */
+ this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
return reason_code;
@@ -545,6 +582,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
if (reason_code != SMC_CLC_DECL_PEERDECL) {
rc = smc_clc_send_decline(smc, reason_code, version);
if (rc < 0) {
+ this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
return rc;
@@ -992,6 +1030,7 @@ static int __smc_connect(struct smc_sock *smc)
if (rc)
goto vlan_cleanup;
+ SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
smc_connect_ism_vlan_cleanup(smc, ini);
kfree(buf);
kfree(ini);
@@ -1307,7 +1346,9 @@ static void smc_listen_out_connected(struct smc_sock *new_smc)
static void smc_listen_out_err(struct smc_sock *new_smc)
{
struct sock *newsmcsk = &new_smc->sk;
+ struct net *net = sock_net(newsmcsk);
+ this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
if (newsmcsk->sk_state == SMC_INIT)
sock_put(&new_smc->sk); /* passive closing */
newsmcsk->sk_state = SMC_CLOSED;
@@ -1325,8 +1366,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
smc_listen_out_err(new_smc);
return;
}
- smc_switch_to_fallback(new_smc);
- new_smc->fallback_rsn = reason_code;
+ smc_switch_to_fallback(new_smc, reason_code);
if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
smc_listen_out_err(new_smc);
@@ -1699,8 +1739,7 @@ static void smc_listen_work(struct work_struct *work)
/* check if peer is smc capable */
if (!tcp_sk(newclcsock->sk)->syn_smc) {
- smc_switch_to_fallback(new_smc);
- new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
+ smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
smc_listen_out_connected(new_smc);
return;
}
@@ -1778,6 +1817,7 @@ static void smc_listen_work(struct work_struct *work)
}
smc_conn_save_peer_info(new_smc, cclc);
smc_listen_out_connected(new_smc);
+ SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
goto out_free;
out_unlock:
@@ -1984,18 +2024,19 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_FASTOPEN) {
if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
- smc_switch_to_fallback(smc);
- smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
+ smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
} else {
rc = -EINVAL;
goto out;
}
}
- if (smc->use_fallback)
+ if (smc->use_fallback) {
rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
- else
+ } else {
rc = smc_tx_sendmsg(smc, msg, len);
+ SMC_STAT_TX_PAYLOAD(smc, len, rc);
+ }
out:
release_sock(sk);
return rc;
@@ -2030,6 +2071,7 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
} else {
msg->msg_namelen = 0;
rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
+ SMC_STAT_RX_PAYLOAD(smc, rc, rc);
}
out:
@@ -2176,7 +2218,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
optval, optlen);
if (smc->clcsock->sk->sk_err) {
sk->sk_err = smc->clcsock->sk->sk_err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
if (optlen < sizeof(int))
@@ -2194,8 +2236,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
case TCP_FASTOPEN_NO_COOKIE:
/* option not supported by SMC */
if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
- smc_switch_to_fallback(smc);
- smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
+ smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
} else {
rc = -EINVAL;
}
@@ -2204,18 +2245,22 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
if (sk->sk_state != SMC_INIT &&
sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) {
- if (val)
+ if (val) {
+ SMC_STAT_INC(smc, ndly_cnt);
mod_delayed_work(smc->conn.lgr->tx_wq,
&smc->conn.tx_work, 0);
+ }
}
break;
case TCP_CORK:
if (sk->sk_state != SMC_INIT &&
sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) {
- if (!val)
+ if (!val) {
+ SMC_STAT_INC(smc, cork_cnt);
mod_delayed_work(smc->conn.lgr->tx_wq,
&smc->conn.tx_work, 0);
+ }
}
break;
case TCP_DEFER_ACCEPT:
@@ -2338,11 +2383,13 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
goto out;
}
release_sock(sk);
- if (smc->use_fallback)
+ if (smc->use_fallback) {
rc = kernel_sendpage(smc->clcsock, page, offset,
size, flags);
- else
+ } else {
+ SMC_STAT_INC(smc, sendpage_cnt);
rc = sock_no_sendpage(sock, page, offset, size, flags);
+ }
out:
return rc;
@@ -2391,6 +2438,7 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
flags = MSG_DONTWAIT;
else
flags = 0;
+ SMC_STAT_INC(smc, splice_cnt);
rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
}
out:
@@ -2479,6 +2527,16 @@ static void __net_exit smc_net_exit(struct net *net)
smc_pnet_net_exit(net);
}
+static __net_init int smc_net_stat_init(struct net *net)
+{
+ return smc_stats_init(net);
+}
+
+static void __net_exit smc_net_stat_exit(struct net *net)
+{
+ smc_stats_exit(net);
+}
+
static struct pernet_operations smc_net_ops = {
.init = smc_net_init,
.exit = smc_net_exit,
@@ -2486,6 +2544,11 @@ static struct pernet_operations smc_net_ops = {
.size = sizeof(struct smc_net),
};
+static struct pernet_operations smc_net_stat_ops = {
+ .init = smc_net_stat_init,
+ .exit = smc_net_stat_exit,
+};
+
static int __init smc_init(void)
{
int rc;
@@ -2494,6 +2557,10 @@ static int __init smc_init(void)
if (rc)
return rc;
+ rc = register_pernet_subsys(&smc_net_stat_ops);
+ if (rc)
+ return rc;
+
smc_ism_init();
smc_clc_init();
@@ -2595,6 +2662,7 @@ static void __exit smc_exit(void)
proto_unregister(&smc_proto);
smc_pnet_exit();
smc_nl_exit();
+ unregister_pernet_subsys(&smc_net_stat_ops);
unregister_pernet_subsys(&smc_net_ops);
rcu_barrier();
}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 0df85a12651e..cd0d7c908b2a 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -33,6 +33,7 @@
#include "smc_close.h"
#include "smc_ism.h"
#include "smc_netlink.h"
+#include "smc_stats.h"
#define SMC_LGR_NUM_INCR 256
#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
@@ -1235,20 +1236,6 @@ static void smc_lgr_free(struct smc_link_group *lgr)
kfree(lgr);
}
-static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
-{
- int i;
-
- for (i = 0; i < SMC_RMBE_SIZES; i++) {
- struct smc_buf_desc *buf_desc;
-
- list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
- buf_desc->len += sizeof(struct smcd_cdc_msg);
- smc_ism_unregister_dmb(lgr->smcd, buf_desc);
- }
- }
-}
-
static void smc_sk_wake_ups(struct smc_sock *smc)
{
smc->sk.sk_write_space(&smc->sk);
@@ -1285,7 +1272,6 @@ static void smc_lgr_cleanup(struct smc_link_group *lgr)
{
if (lgr->is_smcd) {
smc_ism_signal_shutdown(lgr);
- smcd_unregister_all_dmbs(lgr);
} else {
u32 rsn = lgr->llc_termination_rsn;
@@ -2044,6 +2030,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
struct smc_link_group *lgr = conn->lgr;
struct list_head *buf_list;
int bufsize, bufsize_short;
+ bool is_dgraded = false;
struct mutex *lock; /* lock buffer list */
int sk_buf_size;
@@ -2071,6 +2058,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
/* check for reusable slot in the link group */
buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
if (buf_desc) {
+ SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
+ SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
memset(buf_desc->cpu_addr, 0, bufsize);
break; /* found reusable slot */
}
@@ -2082,9 +2071,16 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
if (PTR_ERR(buf_desc) == -ENOMEM)
break;
- if (IS_ERR(buf_desc))
+ if (IS_ERR(buf_desc)) {
+ if (!is_dgraded) {
+ is_dgraded = true;
+ SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb);
+ }
continue;
+ }
+ SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
+ SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
buf_desc->used = 1;
mutex_lock(lock);
list_add(&buf_desc->list, buf_list);
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 967712ba52a0..9cb2df289963 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -470,7 +470,6 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
mutex_unlock(&smcd_dev_list.mutex);
smcd->going_away = 1;
smc_smcd_terminate_all(smcd);
- flush_workqueue(smcd->event_wq);
destroy_workqueue(smcd->event_wq);
device_del(&smcd->dev);
diff --git a/net/smc/smc_netlink.c b/net/smc/smc_netlink.c
index 140419a19dbf..6fb6f96c1d17 100644
--- a/net/smc/smc_netlink.c
+++ b/net/smc/smc_netlink.c
@@ -19,6 +19,7 @@
#include "smc_core.h"
#include "smc_ism.h"
#include "smc_ib.h"
+#include "smc_stats.h"
#include "smc_netlink.h"
#define SMC_CMD_MAX_ATTR 1
@@ -55,6 +56,16 @@ static const struct genl_ops smc_gen_nl_ops[] = {
/* can be retrieved by unprivileged users */
.dumpit = smcr_nl_get_device,
},
+ {
+ .cmd = SMC_NETLINK_GET_STATS,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smc_nl_get_stats,
+ },
+ {
+ .cmd = SMC_NETLINK_GET_FBACK_STATS,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smc_nl_get_fback_stats,
+ },
};
static const struct nla_policy smc_gen_nl_policy[2] = {
diff --git a/net/smc/smc_netlink.h b/net/smc/smc_netlink.h
index 3477265cba6c..5ce2c0a89ccd 100644
--- a/net/smc/smc_netlink.h
+++ b/net/smc/smc_netlink.h
@@ -18,7 +18,7 @@
extern struct genl_family smc_gen_nl_family;
struct smc_nl_dmp_ctx {
- int pos[2];
+ int pos[3];
};
static inline struct smc_nl_dmp_ctx *smc_nl_dmp_ctx(struct netlink_callback *c)
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index fcfac59f8b72..170b733bc736 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -21,6 +21,7 @@
#include "smc_cdc.h"
#include "smc_tx.h" /* smc_tx_consumer_update() */
#include "smc_rx.h"
+#include "smc_stats.h"
/* callback implementation to wakeup consumers blocked with smc_rx_wait().
* indirectly called by smc_cdc_msg_recv_action().
@@ -227,6 +228,7 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
conn->urg_state == SMC_URG_READ)
return -EINVAL;
+ SMC_STAT_INC(smc, urg_data_cnt);
if (conn->urg_state == SMC_URG_VALID) {
if (!(flags & MSG_PEEK))
smc->conn.urg_state = SMC_URG_READ;
@@ -303,6 +305,12 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+ readable = atomic_read(&conn->bytes_to_rcv);
+ if (readable >= conn->rmb_desc->len)
+ SMC_STAT_RMB_RX_FULL(smc, !conn->lnk);
+
+ if (len < readable)
+ SMC_STAT_RMB_RX_SIZE_SMALL(smc, !conn->lnk);
/* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;
diff --git a/net/smc/smc_stats.c b/net/smc/smc_stats.c
new file mode 100644
index 000000000000..e80e34f7ac15
--- /dev/null
+++ b/net/smc/smc_stats.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * SMC statistics netlink routines
+ *
+ * Copyright IBM Corp. 2021
+ *
+ * Author(s): Guvenc Gulce
+ */
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/ctype.h>
+#include <linux/smc.h>
+#include <net/genetlink.h>
+#include <net/sock.h>
+#include "smc_netlink.h"
+#include "smc_stats.h"
+
+int smc_stats_init(struct net *net)
+{
+ net->smc.fback_rsn = kzalloc(sizeof(*net->smc.fback_rsn), GFP_KERNEL);
+ if (!net->smc.fback_rsn)
+ goto err_fback;
+ net->smc.smc_stats = alloc_percpu(struct smc_stats);
+ if (!net->smc.smc_stats)
+ goto err_stats;
+ mutex_init(&net->smc.mutex_fback_rsn);
+ return 0;
+
+err_stats:
+ kfree(net->smc.fback_rsn);
+err_fback:
+ return -ENOMEM;
+}
+
+void smc_stats_exit(struct net *net)
+{
+ kfree(net->smc.fback_rsn);
+ if (net->smc.smc_stats)
+ free_percpu(net->smc.smc_stats);
+}
+
+static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb,
+ struct smc_stats *stats, int tech,
+ int type)
+{
+ struct smc_stats_rmbcnt *stats_rmb_cnt;
+ struct nlattr *attrs;
+
+ if (type == SMC_NLA_STATS_T_TX_RMB_STATS)
+ stats_rmb_cnt = &stats->smc[tech].rmb_tx;
+ else
+ stats_rmb_cnt = &stats->smc[tech].rmb_rx;
+
+ attrs = nla_nest_start(skb, type);
+ if (!attrs)
+ goto errout;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_REUSE_CNT,
+ stats_rmb_cnt->reuse_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT,
+ stats_rmb_cnt->buf_size_small_peer_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_CNT,
+ stats_rmb_cnt->buf_size_small_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_PEER_CNT,
+ stats_rmb_cnt->buf_full_peer_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_CNT,
+ stats_rmb_cnt->buf_full_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_ALLOC_CNT,
+ stats_rmb_cnt->alloc_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_DGRADE_CNT,
+ stats_rmb_cnt->dgrade_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+
+ nla_nest_end(skb, attrs);
+ return 0;
+
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ return -EMSGSIZE;
+}
+
+static int smc_nl_fill_stats_bufsize_data(struct sk_buff *skb,
+ struct smc_stats *stats, int tech,
+ int type)
+{
+ struct smc_stats_memsize *stats_pload;
+ struct nlattr *attrs;
+
+ if (type == SMC_NLA_STATS_T_TXPLOAD_SIZE)
+ stats_pload = &stats->smc[tech].tx_pd;
+ else if (type == SMC_NLA_STATS_T_RXPLOAD_SIZE)
+ stats_pload = &stats->smc[tech].rx_pd;
+ else if (type == SMC_NLA_STATS_T_TX_RMB_SIZE)
+ stats_pload = &stats->smc[tech].tx_rmbsize;
+ else if (type == SMC_NLA_STATS_T_RX_RMB_SIZE)
+ stats_pload = &stats->smc[tech].rx_rmbsize;
+ else
+ goto errout;
+
+ attrs = nla_nest_start(skb, type);
+ if (!attrs)
+ goto errout;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_8K,
+ stats_pload->buf[SMC_BUF_8K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_16K,
+ stats_pload->buf[SMC_BUF_16K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_32K,
+ stats_pload->buf[SMC_BUF_32K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_64K,
+ stats_pload->buf[SMC_BUF_64K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_128K,
+ stats_pload->buf[SMC_BUF_128K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_256K,
+ stats_pload->buf[SMC_BUF_256K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_512K,
+ stats_pload->buf[SMC_BUF_512K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_1024K,
+ stats_pload->buf[SMC_BUF_1024K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_G_1024K,
+ stats_pload->buf[SMC_BUF_G_1024K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+
+ nla_nest_end(skb, attrs);
+ return 0;
+
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ return -EMSGSIZE;
+}
+
+static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
+ struct smc_stats *stats, int tech)
+{
+ struct smc_stats_tech *smc_tech;
+ struct nlattr *attrs;
+
+ smc_tech = &stats->smc[tech];
+ if (tech == SMC_TYPE_D)
+ attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCD_TECH);
+ else
+ attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCR_TECH);
+
+ if (!attrs)
+ goto errout;
+ if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
+ SMC_NLA_STATS_T_TX_RMB_STATS))
+ goto errattr;
+ if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
+ SMC_NLA_STATS_T_RX_RMB_STATS))
+ goto errattr;
+ if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
+ SMC_NLA_STATS_T_TXPLOAD_SIZE))
+ goto errattr;
+ if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
+ SMC_NLA_STATS_T_RXPLOAD_SIZE))
+ goto errattr;
+ if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
+ SMC_NLA_STATS_T_TX_RMB_SIZE))
+ goto errattr;
+ if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
+ SMC_NLA_STATS_T_RX_RMB_SIZE))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V1_SUCC,
+ smc_tech->clnt_v1_succ_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V2_SUCC,
+ smc_tech->clnt_v2_succ_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V1_SUCC,
+ smc_tech->srv_v1_succ_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V2_SUCC,
+ smc_tech->srv_v2_succ_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_BYTES,
+ smc_tech->rx_bytes,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_BYTES,
+ smc_tech->tx_bytes,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT,
+ smc_tech->rx_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_CNT,
+ smc_tech->tx_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT,
+ smc_tech->sendpage_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT,
+ smc_tech->cork_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_NDLY_CNT,
+ smc_tech->ndly_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SPLICE_CNT,
+ smc_tech->splice_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_URG_DATA_CNT,
+ smc_tech->urg_data_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+
+ nla_nest_end(skb, attrs);
+ return 0;
+
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ return -EMSGSIZE;
+}
+
+int smc_nl_get_stats(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ struct net *net = sock_net(skb->sk);
+ struct smc_stats *stats;
+ struct nlattr *attrs;
+ int cpu, i, size;
+ void *nlh;
+ u64 *src;
+ u64 *sum;
+
+ if (cb_ctx->pos[0])
+ goto errmsg;
+ nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_GET_STATS);
+ if (!nlh)
+ goto errmsg;
+
+ attrs = nla_nest_start(skb, SMC_GEN_STATS);
+ if (!attrs)
+ goto errnest;
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ goto erralloc;
+ size = sizeof(*stats) / sizeof(u64);
+ for_each_possible_cpu(cpu) {
+ src = (u64 *)per_cpu_ptr(net->smc.smc_stats, cpu);
+ sum = (u64 *)stats;
+ for (i = 0; i < size; i++)
+ *(sum++) += *(src++);
+ }
+ if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_D))
+ goto errattr;
+ if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_R))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_CLNT_HS_ERR_CNT,
+ stats->clnt_hshake_err_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_SRV_HS_ERR_CNT,
+ stats->srv_hshake_err_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, nlh);
+ cb_ctx->pos[0] = 1;
+ kfree(stats);
+ return skb->len;
+
+errattr:
+ kfree(stats);
+erralloc:
+ nla_nest_cancel(skb, attrs);
+errnest:
+ genlmsg_cancel(skb, nlh);
+errmsg:
+ return skb->len;
+}
+
+static int smc_nl_get_fback_details(struct sk_buff *skb,
+ struct netlink_callback *cb, int pos,
+ bool is_srv)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ struct net *net = sock_net(skb->sk);
+ int cnt_reported = cb_ctx->pos[2];
+ struct smc_stats_fback *trgt_arr;
+ struct nlattr *attrs;
+ int rc = 0;
+ void *nlh;
+
+ if (is_srv)
+ trgt_arr = &net->smc.fback_rsn->srv[0];
+ else
+ trgt_arr = &net->smc.fback_rsn->clnt[0];
+ if (!trgt_arr[pos].fback_code)
+ return -ENODATA;
+ nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_GET_FBACK_STATS);
+ if (!nlh)
+ goto errmsg;
+ attrs = nla_nest_start(skb, SMC_GEN_FBACK_STATS);
+ if (!attrs)
+ goto errout;
+ if (nla_put_u8(skb, SMC_NLA_FBACK_STATS_TYPE, is_srv))
+ goto errattr;
+ if (!cnt_reported) {
+ if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT,
+ net->smc.fback_rsn->srv_fback_cnt,
+ SMC_NLA_FBACK_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT,
+ net->smc.fback_rsn->clnt_fback_cnt,
+ SMC_NLA_FBACK_STATS_PAD))
+ goto errattr;
+ cnt_reported = 1;
+ }
+
+ if (nla_put_u32(skb, SMC_NLA_FBACK_STATS_RSN_CODE,
+ trgt_arr[pos].fback_code))
+ goto errattr;
+ if (nla_put_u16(skb, SMC_NLA_FBACK_STATS_RSN_CNT,
+ trgt_arr[pos].count))
+ goto errattr;
+
+ cb_ctx->pos[2] = cnt_reported;
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, nlh);
+ return rc;
+
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ genlmsg_cancel(skb, nlh);
+errmsg:
+ return -EMSGSIZE;
+}
+
+int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ struct net *net = sock_net(skb->sk);
+ int rc_srv = 0, rc_clnt = 0, k;
+ int skip_serv = cb_ctx->pos[1];
+ int snum = cb_ctx->pos[0];
+ bool is_srv = true;
+
+ mutex_lock(&net->smc.mutex_fback_rsn);
+ for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) {
+ if (k < snum)
+ continue;
+ if (!skip_serv) {
+ rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv);
+ if (rc_srv && rc_srv != -ENODATA)
+ break;
+ } else {
+ skip_serv = 0;
+ }
+ rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv);
+ if (rc_clnt && rc_clnt != -ENODATA) {
+ skip_serv = 1;
+ break;
+ }
+ if (rc_clnt == -ENODATA && rc_srv == -ENODATA)
+ break;
+ }
+ mutex_unlock(&net->smc.mutex_fback_rsn);
+ cb_ctx->pos[1] = skip_serv;
+ cb_ctx->pos[0] = k;
+ return skb->len;
+}
diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
new file mode 100644
index 000000000000..84b7ecd8c05c
--- /dev/null
+++ b/net/smc/smc_stats.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Macros for SMC statistics
+ *
+ * Copyright IBM Corp. 2021
+ *
+ * Author(s): Guvenc Gulce
+ */
+
+#ifndef NET_SMC_SMC_STATS_H_
+#define NET_SMC_SMC_STATS_H_
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/ctype.h>
+#include <linux/smc.h>
+
+#include "smc_clc.h"
+
+#define SMC_MAX_FBACK_RSN_CNT 30
+
+enum {
+ SMC_BUF_8K,
+ SMC_BUF_16K,
+ SMC_BUF_32K,
+ SMC_BUF_64K,
+ SMC_BUF_128K,
+ SMC_BUF_256K,
+ SMC_BUF_512K,
+ SMC_BUF_1024K,
+ SMC_BUF_G_1024K,
+ SMC_BUF_MAX,
+};
+
+struct smc_stats_fback {
+ int fback_code;
+ u16 count;
+};
+
+struct smc_stats_rsn {
+ struct smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT];
+ struct smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT];
+ u64 srv_fback_cnt;
+ u64 clnt_fback_cnt;
+};
+
+struct smc_stats_rmbcnt {
+ u64 buf_size_small_peer_cnt;
+ u64 buf_size_small_cnt;
+ u64 buf_full_peer_cnt;
+ u64 buf_full_cnt;
+ u64 reuse_cnt;
+ u64 alloc_cnt;
+ u64 dgrade_cnt;
+};
+
+struct smc_stats_memsize {
+ u64 buf[SMC_BUF_MAX];
+};
+
+struct smc_stats_tech {
+ struct smc_stats_memsize tx_rmbsize;
+ struct smc_stats_memsize rx_rmbsize;
+ struct smc_stats_memsize tx_pd;
+ struct smc_stats_memsize rx_pd;
+ struct smc_stats_rmbcnt rmb_tx;
+ struct smc_stats_rmbcnt rmb_rx;
+ u64 clnt_v1_succ_cnt;
+ u64 clnt_v2_succ_cnt;
+ u64 srv_v1_succ_cnt;
+ u64 srv_v2_succ_cnt;
+ u64 sendpage_cnt;
+ u64 urg_data_cnt;
+ u64 splice_cnt;
+ u64 cork_cnt;
+ u64 ndly_cnt;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 rx_cnt;
+ u64 tx_cnt;
+};
+
+struct smc_stats {
+ struct smc_stats_tech smc[2];
+ u64 clnt_hshake_err_cnt;
+ u64 srv_hshake_err_cnt;
+};
+
+#define SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) \
+do { \
+ typeof(_smc_stats) stats = (_smc_stats); \
+ typeof(_tech) t = (_tech); \
+ typeof(_len) l = (_len); \
+ int _pos = fls64((l) >> 13); \
+ typeof(_rc) r = (_rc); \
+ int m = SMC_BUF_MAX - 1; \
+ this_cpu_inc((*stats).smc[t].key ## _cnt); \
+ if (r <= 0) \
+ break; \
+ _pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
+ this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
+ this_cpu_add((*stats).smc[t].key ## _bytes, r); \
+} \
+while (0)
+
+#define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \
+do { \
+ typeof(_smc) __smc = _smc; \
+ struct net *_net = sock_net(&__smc->sk); \
+ struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
+ typeof(length) _len = (length); \
+ typeof(rcode) _rc = (rcode); \
+ bool is_smcd = !__smc->conn.lnk; \
+ if (is_smcd) \
+ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, tx, _len, _rc); \
+ else \
+ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, tx, _len, _rc); \
+} \
+while (0)
+
+#define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \
+do { \
+ typeof(_smc) __smc = _smc; \
+ struct net *_net = sock_net(&__smc->sk); \
+ struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
+ typeof(length) _len = (length); \
+ typeof(rcode) _rc = (rcode); \
+ bool is_smcd = !__smc->conn.lnk; \
+ if (is_smcd) \
+ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, rx, _len, _rc); \
+ else \
+ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, rx, _len, _rc); \
+} \
+while (0)
+
+#define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len) \
+do { \
+ typeof(_len) _l = (_len); \
+ typeof(_tech) t = (_tech); \
+ int _pos = fls((_l) >> 13); \
+ int m = SMC_BUF_MAX - 1; \
+ _pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
+ this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
+} \
+while (0)
+
+#define SMC_STAT_RMB_SUB(_smc_stats, type, t, key) \
+ this_cpu_inc((*(_smc_stats)).smc[t].rmb ## _ ## key.type ## _cnt)
+
+#define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_rx, _len) \
+do { \
+ struct net *_net = sock_net(&(_smc)->sk); \
+ struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
+ typeof(_is_smcd) is_d = (_is_smcd); \
+ typeof(_is_rx) is_r = (_is_rx); \
+ typeof(_len) l = (_len); \
+ if ((is_d) && (is_r)) \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, rx, l); \
+ if ((is_d) && !(is_r)) \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, tx, l); \
+ if (!(is_d) && (is_r)) \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, rx, l); \
+ if (!(is_d) && !(is_r)) \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, tx, l); \
+} \
+while (0)
+
+#define SMC_STAT_RMB(_smc, type, _is_smcd, _is_rx) \
+do { \
+ struct net *net = sock_net(&(_smc)->sk); \
+ struct smc_stats __percpu *_smc_stats = net->smc.smc_stats; \
+ typeof(_is_smcd) is_d = (_is_smcd); \
+ typeof(_is_rx) is_r = (_is_rx); \
+ if ((is_d) && (is_r)) \
+ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, rx); \
+ if ((is_d) && !(is_r)) \
+ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, tx); \
+ if (!(is_d) && (is_r)) \
+ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, rx); \
+ if (!(is_d) && !(is_r)) \
+ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, tx); \
+} \
+while (0)
+
+#define SMC_STAT_BUF_REUSE(smc, is_smcd, is_rx) \
+ SMC_STAT_RMB(smc, reuse, is_smcd, is_rx)
+
+#define SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rx) \
+ SMC_STAT_RMB(smc, alloc, is_smcd, is_rx)
+
+#define SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rx) \
+ SMC_STAT_RMB(smc, dgrade, is_smcd, is_rx)
+
+#define SMC_STAT_RMB_TX_PEER_FULL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_full_peer, is_smcd, false)
+
+#define SMC_STAT_RMB_TX_FULL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_full, is_smcd, false)
+
+#define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_size_small_peer, is_smcd, false)
+
+#define SMC_STAT_RMB_TX_SIZE_SMALL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_size_small, is_smcd, false)
+
+#define SMC_STAT_RMB_RX_SIZE_SMALL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_size_small, is_smcd, true)
+
+#define SMC_STAT_RMB_RX_FULL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_full, is_smcd, true)
+
+#define SMC_STAT_INC(_smc, type) \
+do { \
+ typeof(_smc) __smc = _smc; \
+ bool is_smcd = !(__smc)->conn.lnk; \
+ struct net *net = sock_net(&(__smc)->sk); \
+ struct smc_stats __percpu *smc_stats = net->smc.smc_stats; \
+ if ((is_smcd)) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \
+ else \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_R].type); \
+} \
+while (0)
+
+#define SMC_STAT_CLNT_SUCC_INC(net, _aclc) \
+do { \
+ typeof(_aclc) acl = (_aclc); \
+ bool is_v2 = (acl->hdr.version == SMC_V2); \
+ bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \
+ struct smc_stats __percpu *smc_stats = (net)->smc.smc_stats; \
+ if (is_v2 && is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \
+ else if (is_v2 && !is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v2_succ_cnt); \
+ else if (!is_v2 && is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v1_succ_cnt); \
+ else if (!is_v2 && !is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v1_succ_cnt); \
+} \
+while (0)
+
+#define SMC_STAT_SERV_SUCC_INC(net, _ini) \
+do { \
+ typeof(_ini) i = (_ini); \
+ bool is_v2 = (i->smcd_version & SMC_V2); \
+ bool is_smcd = (i->is_smcd); \
+ typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
+ if (is_v2 && is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
+ else if (is_v2 && !is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v2_succ_cnt); \
+ else if (!is_v2 && is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v1_succ_cnt); \
+ else if (!is_v2 && !is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v1_succ_cnt); \
+} \
+while (0)
+
+int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb);
+int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb);
+int smc_stats_init(struct net *net);
+void smc_stats_exit(struct net *net);
+
+#endif /* NET_SMC_SMC_STATS_H_ */
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 4532c16bf85e..289025cd545a 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -27,6 +27,7 @@
#include "smc_close.h"
#include "smc_ism.h"
#include "smc_tx.h"
+#include "smc_stats.h"
#define SMC_TX_WORK_DELAY 0
#define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
@@ -45,6 +46,8 @@ static void smc_tx_write_space(struct sock *sk)
/* similar to sk_stream_write_space */
if (atomic_read(&smc->conn.sndbuf_space) && sock) {
+ if (test_bit(SOCK_NOSPACE, &sock->flags))
+ SMC_STAT_RMB_TX_FULL(smc, !smc->conn.lnk);
clear_bit(SOCK_NOSPACE, &sock->flags);
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
@@ -151,9 +154,19 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
goto out_err;
}
+ if (sk->sk_state == SMC_INIT)
+ return -ENOTCONN;
+
+ if (len > conn->sndbuf_desc->len)
+ SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk);
+
+ if (len > conn->peer_rmbe_size)
+ SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, !conn->lnk);
+
+ if (msg->msg_flags & MSG_OOB)
+ SMC_STAT_INC(smc, urg_data_cnt);
+
while (msg_data_left(msg)) {
- if (sk->sk_state == SMC_INIT)
- return -ENOTCONN;
if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
(smc->sk.sk_err == ECONNABORTED) ||
conn->killed)
@@ -419,8 +432,12 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
/* destination: RMBE */
/* cf. snd_wnd */
rmbespace = atomic_read(&conn->peer_rmbe_space);
- if (rmbespace <= 0)
+ if (rmbespace <= 0) {
+ struct smc_sock *smc = container_of(conn, struct smc_sock,
+ conn);
+ SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk);
return 0;
+ }
smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);