aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-05 14:05:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-05 14:05:57 -0700
commit242b23319809e05170b3cc0d44d3b4bd202bb073 (patch)
tree195e39fd02942ee0ef60ead7239859f2fe0c12a1 /drivers/infiniband/ulp
parentMerge tag 'gpio-v5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio (diff)
parentRDMA/cm: Spurious WARNING triggered in cm_destroy_id() (diff)
downloadlinux-dev-242b23319809e05170b3cc0d44d3b4bd202bb073.tar.xz
linux-dev-242b23319809e05170b3cc0d44d3b4bd202bb073.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "A more active cycle than most of the recent past, with a few large, long discussed works this time. The RNBD block driver has been posted for nearly two years now, and flowing through RDMA due to it also introducing a new ULP. The removal of FMR has been a recurring discussion theme for a long time. And the usual smattering of features and bug fixes. Summary: - Various small driver bugs fixes in rxe, mlx5, hfi1, and efa - Continuing driver cleanups in bnxt_re, hns - Big cleanup of mlx5 QP creation flows - More consistent use of src port and flow label when LAG is used and a mlx5 implementation - Additional set of cleanups for IB CM - 'RNBD' network block driver and target. This is a network block RDMA device specific to ionos's cloud environment. It brings strong multipath and resiliency capabilities. - Accelerated IPoIB for HFI1 - QP/WQ/SRQ ioctl migration for uverbs, and support for multiple async fds - Support for exchanging the new IBTA defiend ECE data during RDMA CM exchanges - Removal of the very old and insecure FMR interface from all ULPs and drivers. FRWR should be preferred for at least a decade now" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (247 commits) RDMA/cm: Spurious WARNING triggered in cm_destroy_id() RDMA/mlx5: Return ECE DC support RDMA/mlx5: Don't rely on FW to set zeros in ECE response RDMA/mlx5: Return an error if copy_to_user fails IB/hfi1: Use free_netdev() in hfi1_netdev_free() RDMA/hns: Uninitialized variable in modify_qp_init_to_rtr() RDMA/core: Move and rename trace_cm_id_create() IB/hfi1: Fix hfi1_netdev_rx_init() error handling RDMA: Remove 'max_map_per_fmr' RDMA: Remove 'max_fmr' RDMA/core: Remove FMR device ops RDMA/rdmavt: Remove FMR memory registration RDMA/mthca: Remove FMR support for memory registration RDMA/mlx4: Remove FMR support for memory registration RDMA/i40iw: Remove FMR leftovers RDMA/bnxt_re: Remove FMR leftovers RDMA/mlx5: Remove FMR leftovers RDMA/core: Remove FMR pool API RDMA/rds: Remove FMR support for memory registration RDMA/srp: Remove support for FMR memory registration ...
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/Makefile1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c37
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h79
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c19
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c188
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c126
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c5
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c12
-rw-r--r--drivers/infiniband/ulp/rtrs/Kconfig27
-rw-r--r--drivers/infiniband/ulp/rtrs/Makefile15
-rw-r--r--drivers/infiniband/ulp/rtrs/README213
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c200
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c483
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c2992
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h252
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-log.h28
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h399
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c38
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c321
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2178
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h148
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c612
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h196
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c265
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h27
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c67
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h5
30 files changed, 8265 insertions, 697 deletions
diff --git a/drivers/infiniband/ulp/Makefile b/drivers/infiniband/ulp/Makefile
index 437813c7b481..4d0004b58377 100644
--- a/drivers/infiniband/ulp/Makefile
+++ b/drivers/infiniband/ulp/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_INFINIBAND_SRPT) += srpt/
obj-$(CONFIG_INFINIBAND_ISER) += iser/
obj-$(CONFIG_INFINIBAND_ISERT) += isert/
obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic/
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs/
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ceec24d45185..3cfb682b91b0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -86,7 +86,7 @@ struct workqueue_struct *ipoib_workqueue;
struct ib_sa_client ipoib_sa_client;
-static void ipoib_add_one(struct ib_device *device);
+static int ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device, void *client_data);
static void ipoib_neigh_reclaim(struct rcu_head *rp);
static struct net_device *ipoib_get_net_dev_by_params(
@@ -479,9 +479,6 @@ static struct net_device *ipoib_get_net_dev_by_params(
if (ret)
return NULL;
- if (!dev_list)
- return NULL;
-
/* See if we can find a unique device matching the L2 parameters */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, NULL, &net_dev);
@@ -529,6 +526,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
"will cause multicast packet drops\n");
netdev_update_features(dev);
dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_real_num_tx_queues(dev, 1);
rtnl_unlock();
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
@@ -540,6 +538,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
netdev_update_features(dev);
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
rtnl_unlock();
ipoib_flush_paths(dev);
return (!rtnl_trylock()) ? -EBUSY : 0;
@@ -1860,7 +1859,7 @@ static int ipoib_parent_init(struct net_device *ndev)
priv->port);
return result;
}
- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+ priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr);
result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
if (result) {
@@ -1901,6 +1900,7 @@ static int ipoib_ndo_init(struct net_device *ndev)
{
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
int rc;
+ struct rdma_netdev *rn = netdev_priv(ndev);
if (priv->parent) {
ipoib_child_init(ndev);
@@ -1913,6 +1913,7 @@ static int ipoib_ndo_init(struct net_device *ndev)
/* MTU will be reset when mcast join happens */
ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
+ rn->mtu = priv->mcast_mtu;
ndev->max_mtu = IPOIB_CM_MTU;
ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
@@ -2074,9 +2075,17 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_do_ioctl = ipoib_ioctl,
};
+static const struct net_device_ops ipoib_netdev_default_pf = {
+ .ndo_init = ipoib_dev_init_default,
+ .ndo_uninit = ipoib_dev_uninit_default,
+ .ndo_open = ipoib_ib_dev_open_default,
+ .ndo_stop = ipoib_ib_dev_stop_default,
+};
+
void ipoib_setup_common(struct net_device *dev)
{
dev->header_ops = &ipoib_header_ops;
+ dev->netdev_ops = &ipoib_netdev_default_pf;
ipoib_set_ethtool_ops(dev);
@@ -2126,13 +2135,6 @@ static void ipoib_build_priv(struct net_device *dev)
INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
}
-static const struct net_device_ops ipoib_netdev_default_pf = {
- .ndo_init = ipoib_dev_init_default,
- .ndo_uninit = ipoib_dev_uninit_default,
- .ndo_open = ipoib_ib_dev_open_default,
- .ndo_stop = ipoib_ib_dev_stop_default,
-};
-
static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
const char *name)
{
@@ -2170,7 +2172,6 @@ int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
if (rc != -EOPNOTSUPP)
goto out;
- dev->netdev_ops = &ipoib_netdev_default_pf;
rn->send = ipoib_send;
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
@@ -2516,7 +2517,7 @@ sysfs_failed:
return ERR_PTR(-ENOMEM);
}
-static void ipoib_add_one(struct ib_device *device)
+static int ipoib_add_one(struct ib_device *device)
{
struct list_head *dev_list;
struct net_device *dev;
@@ -2526,7 +2527,7 @@ static void ipoib_add_one(struct ib_device *device)
dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
if (!dev_list)
- return;
+ return -ENOMEM;
INIT_LIST_HEAD(dev_list);
@@ -2543,10 +2544,11 @@ static void ipoib_add_one(struct ib_device *device)
if (!count) {
kfree(dev_list);
- return;
+ return -EOPNOTSUPP;
}
ib_set_client_data(device, &ipoib_client, dev_list);
+ return 0;
}
static void ipoib_remove_one(struct ib_device *device, void *client_data)
@@ -2554,9 +2556,6 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
struct list_head *dev_list = client_data;
- if (!dev_list)
- return;
-
list_for_each_entry_safe(priv, tmp, dev_list, list) {
LIST_HEAD(head);
ipoib_parent_unregister_pre(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b9e9562f5034..9bfa514473d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -135,12 +135,11 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
kfree(mcast);
}
-static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
- int can_sleep)
+static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev)
{
struct ipoib_mcast *mcast;
- mcast = kzalloc(sizeof(*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
if (!mcast)
return NULL;
@@ -218,6 +217,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
struct rdma_ah_attr av;
int ret;
int set_qkey = 0;
+ int mtu;
mcast->mcmember = *mcmember;
@@ -240,13 +240,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
priv->broadcast->mcmember.flow_label = mcmember->flow_label;
priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
/* assume if the admin and the mcast are the same both can be changed */
+ mtu = rdma_mtu_enum_to_int(priv->ca, priv->port,
+ priv->broadcast->mcmember.mtu);
if (priv->mcast_mtu == priv->admin_mtu)
- priv->admin_mtu =
- priv->mcast_mtu =
- IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
- else
- priv->mcast_mtu =
- IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ priv->admin_mtu = IPOIB_UD_MTU(mtu);
+ priv->mcast_mtu = IPOIB_UD_MTU(mtu);
+ rn->mtu = priv->mcast_mtu;
priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
spin_unlock_irq(&priv->lock);
@@ -599,7 +598,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (!priv->broadcast) {
struct ipoib_mcast *broadcast;
- broadcast = ipoib_mcast_alloc(dev, 0);
+ broadcast = ipoib_mcast_alloc(dev);
if (!broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n");
/*
@@ -782,7 +781,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
mgid);
- mcast = ipoib_mcast_alloc(dev, 0);
+ mcast = ipoib_mcast_alloc(dev);
if (!mcast) {
ipoib_warn(priv, "unable to allocate memory "
"for multicast structure\n");
@@ -936,7 +935,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
mgid.raw);
- nmcast = ipoib_mcast_alloc(dev, 0);
+ nmcast = ipoib_mcast_alloc(dev);
if (!nmcast) {
ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
continue;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b69304d28f06..587252fd6f57 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -206,6 +206,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
+ if (priv->hca_caps & IB_DEVICE_RDMA_NETDEV_OPA)
+ init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
+
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
pr_warn("%s: failed to create QP\n", ca->name);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 8ac8e18fbe0c..30865605e098 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -97,6 +97,7 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
{
struct net_device *ndev = priv->dev;
int result;
+ struct rdma_netdev *rn = netdev_priv(ndev);
ASSERT_RTNL();
@@ -117,6 +118,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
goto out_early;
}
+ rn->mtu = priv->mcast_mtu;
+
priv->parent = ppriv->dev;
priv->pkey = pkey;
priv->child_type = type;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 029c00163442..1d77c7f42e38 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -65,7 +65,6 @@
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
-#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
#define DRV_NAME "iser"
@@ -313,33 +312,6 @@ struct iser_comp {
};
/**
- * struct iser_reg_ops - Memory registration operations
- * per-device registration schemes
- *
- * @alloc_reg_res: Allocate registration resources
- * @free_reg_res: Free registration resources
- * @reg_mem: Register memory buffers
- * @unreg_mem: Un-register memory buffers
- * @reg_desc_get: Get a registration descriptor for pool
- * @reg_desc_put: Get a registration descriptor to pool
- */
-struct iser_reg_ops {
- int (*alloc_reg_res)(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size);
- void (*free_reg_res)(struct ib_conn *ib_conn);
- int (*reg_mem)(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *reg);
- void (*unreg_mem)(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
- struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
- void (*reg_desc_put)(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-};
-
-/**
* struct iser_device - iSER device handle
*
* @ib_device: RDMA device
@@ -351,8 +323,6 @@ struct iser_reg_ops {
* @comps_used: Number of completion contexts used, Min between online
* cpus and device max completion vectors
* @comps: Dinamically allocated array of completion handlers
- * @reg_ops: Registration ops
- * @remote_inv_sup: Remote invalidate is supported on this device
*/
struct iser_device {
struct ib_device *ib_device;
@@ -362,26 +332,18 @@ struct iser_device {
int refcount;
int comps_used;
struct iser_comp *comps;
- const struct iser_reg_ops *reg_ops;
- bool remote_inv_sup;
};
/**
* struct iser_reg_resources - Fast registration resources
*
* @mr: memory region
- * @fmr_pool: pool of fmrs
* @sig_mr: signature memory region
- * @page_vec: fast reg page list used by fmr pool
* @mr_valid: is mr valid indicator
*/
struct iser_reg_resources {
- union {
- struct ib_mr *mr;
- struct ib_fmr_pool *fmr_pool;
- };
+ struct ib_mr *mr;
struct ib_mr *sig_mr;
- struct iser_page_vec *page_vec;
u8 mr_valid:1;
};
@@ -403,7 +365,7 @@ struct iser_fr_desc {
* struct iser_fr_pool - connection fast registration pool
*
* @list: list of fastreg descriptors
- * @lock: protects fmr/fastreg pool
+ * @lock: protects fastreg pool
* @size: size of the pool
*/
struct iser_fr_pool {
@@ -518,12 +480,6 @@ struct iscsi_iser_task {
struct iser_data_buf prot[ISER_DIRS_NUM];
};
-struct iser_page_vec {
- u64 *pages;
- int npages;
- struct ib_mr fake_mr;
-};
-
/**
* struct iser_global - iSER global context
*
@@ -548,8 +504,6 @@ extern int iser_pi_guard;
extern unsigned int iser_max_sectors;
extern bool iser_always_reg;
-int iser_assign_reg_ops(struct iser_device *device);
-
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task);
@@ -591,22 +545,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
enum iser_data_dir cmd_dir);
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir,
- bool all_imm);
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir);
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm);
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir);
int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *src_addr,
struct sockaddr *dst_addr,
int non_blocking);
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
-void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
-
int iser_post_recvl(struct iser_conn *iser_conn);
int iser_post_recvm(struct iser_conn *iser_conn, int count);
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
@@ -625,26 +574,12 @@ int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session);
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size);
-void iser_free_fmr_pool(struct ib_conn *ib_conn);
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
unsigned int size);
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
-struct iser_fr_desc *
-iser_reg_desc_get_fr(struct ib_conn *ib_conn);
-void
-iser_reg_desc_put_fr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
static inline struct iser_conn *
to_iser_conn(struct ib_conn *ib_conn)
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 4a7045bb0831..27a6f75a9912 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -72,7 +72,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
return err;
}
- err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN, false);
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
return err;
@@ -126,8 +126,8 @@ iser_prepare_write_cmd(struct iscsi_task *task,
return err;
}
- err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT,
- buf_out->data_len == imm_sz);
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
+ buf_out->data_len == imm_sz);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
return err;
@@ -250,8 +250,8 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
- if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
- iser_conn->pages_per_mr))
+ if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
+ iser_conn->pages_per_mr))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
@@ -293,7 +293,7 @@ rx_desc_dma_map_failed:
rx_desc_alloc_fail:
iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
- device->reg_ops->free_reg_res(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
@@ -306,8 +306,7 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
- if (device->reg_ops->free_reg_res)
- device->reg_ops->free_reg_res(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
@@ -768,7 +767,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
int prot_count = scsi_prot_sg_count(iser_task->sc);
if (iser_task->dir[ISER_DIR_IN]) {
- iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_IN],
DMA_FROM_DEVICE);
@@ -779,7 +778,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
}
if (iser_task->dir[ISER_DIR_OUT]) {
- iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_OUT],
DMA_TO_DEVICE);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 999ef7cdd05e..d4e057fac219 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -38,62 +38,13 @@
#include <linux/scatterlist.h>
#include "iscsi_iser.h"
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *mem_reg);
-static
-int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *mem_reg);
-
-static const struct iser_reg_ops fastreg_ops = {
- .alloc_reg_res = iser_alloc_fastreg_pool,
- .free_reg_res = iser_free_fastreg_pool,
- .reg_mem = iser_fast_reg_mr,
- .unreg_mem = iser_unreg_mem_fastreg,
- .reg_desc_get = iser_reg_desc_get_fr,
- .reg_desc_put = iser_reg_desc_put_fr,
-};
-
-static const struct iser_reg_ops fmr_ops = {
- .alloc_reg_res = iser_alloc_fmr_pool,
- .free_reg_res = iser_free_fmr_pool,
- .reg_mem = iser_fast_reg_fmr,
- .unreg_mem = iser_unreg_mem_fmr,
- .reg_desc_get = iser_reg_desc_get_fmr,
- .reg_desc_put = iser_reg_desc_put_fmr,
-};
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
{
iser_err_comp(wc, "memreg");
}
-int iser_assign_reg_ops(struct iser_device *device)
-{
- struct ib_device *ib_dev = device->ib_device;
-
- /* Assign function handles - based on FMR support */
- if (ib_dev->ops.alloc_fmr && ib_dev->ops.dealloc_fmr &&
- ib_dev->ops.map_phys_fmr && ib_dev->ops.unmap_fmr) {
- iser_info("FMR supported, using FMR for registration\n");
- device->reg_ops = &fmr_ops;
- } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- iser_info("FastReg supported, using FastReg for registration\n");
- device->reg_ops = &fastreg_ops;
- device->remote_inv_sup = iser_always_reg;
- } else {
- iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
- return -1;
- }
-
- return 0;
-}
-
-struct iser_fr_desc *
+static struct iser_fr_desc *
iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
@@ -109,7 +60,7 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn)
return desc;
}
-void
+static void
iser_reg_desc_put_fr(struct ib_conn *ib_conn,
struct iser_fr_desc *desc)
{
@@ -121,44 +72,6 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn,
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
-{
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
-
- return list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
-}
-
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc)
-{
-}
-
-static void iser_data_buf_dump(struct iser_data_buf *data,
- struct ib_device *ibdev)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(data->sg, sg, data->dma_nents, i)
- iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
- "off:0x%x sz:0x%x dma_len:0x%x\n",
- i, (unsigned long)sg_dma_address(sg),
- sg_page(sg), sg->offset, sg->length, sg_dma_len(sg));
-}
-
-static void iser_dump_page_vec(struct iser_page_vec *page_vec)
-{
- int i;
-
- iser_err("page vec npages %d data length %lld\n",
- page_vec->npages, page_vec->fake_mr.length);
- for (i = 0; i < page_vec->npages; i++)
- iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
-}
-
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
@@ -213,84 +126,9 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
return 0;
}
-static int iser_set_page(struct ib_mr *mr, u64 addr)
-{
- struct iser_page_vec *page_vec =
- container_of(mr, struct iser_page_vec, fake_mr);
-
- page_vec->pages[page_vec->npages++] = addr;
-
- return 0;
-}
-
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *reg)
-{
- struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
- struct iser_page_vec *page_vec = rsc->page_vec;
- struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
- struct ib_pool_fmr *fmr;
- int ret, plen;
-
- page_vec->npages = 0;
- page_vec->fake_mr.page_size = SZ_4K;
- plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
- mem->dma_nents, NULL, iser_set_page);
- if (unlikely(plen < mem->dma_nents)) {
- iser_err("page vec too short to hold this SG\n");
- iser_data_buf_dump(mem, device->ib_device);
- iser_dump_page_vec(page_vec);
- return -EINVAL;
- }
-
- fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages,
- page_vec->npages, page_vec->pages[0]);
- if (IS_ERR(fmr)) {
- ret = PTR_ERR(fmr);
- iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
- return ret;
- }
-
- reg->sge.lkey = fmr->fmr->lkey;
- reg->rkey = fmr->fmr->rkey;
- reg->sge.addr = page_vec->fake_mr.iova;
- reg->sge.length = page_vec->fake_mr.length;
- reg->mem_h = fmr;
-
- iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
- " length=0x%x\n", reg->sge.lkey, reg->rkey,
- reg->sge.addr, reg->sge.length);
-
- return 0;
-}
-
-/**
- * Unregister (previosuly registered using FMR) memory.
- * If memory is non-FMR does nothing.
- */
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
-{
- struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
-
- if (!reg->mem_h)
- return;
-
- iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
-
- ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
-
- reg->mem_h = NULL;
-}
-
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
- struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
struct iser_fr_desc *desc;
struct ib_mr_status mr_status;
@@ -312,7 +150,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
&mr_status);
}
- device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, desc);
+ iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->mem_h);
reg->mem_h = NULL;
}
@@ -509,15 +347,14 @@ iser_reg_data_sg(struct iscsi_iser_task *task,
if (use_dma_key)
return iser_reg_dma(device, mem, reg);
- return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
+ return iser_fast_reg_mr(task, mem, &desc->rsc, reg);
}
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir,
- bool all_imm)
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm)
{
struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
struct iser_data_buf *mem = &task->data[dir];
struct iser_mem_reg *reg = &task->rdma_reg[dir];
struct iser_fr_desc *desc = NULL;
@@ -528,7 +365,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
if (!use_dma_key) {
- desc = device->reg_ops->reg_desc_get(ib_conn);
+ desc = iser_reg_desc_get_fr(ib_conn);
reg->mem_h = desc;
}
@@ -549,15 +386,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
err_reg:
if (desc)
- device->reg_ops->reg_desc_put(ib_conn, desc);
+ iser_reg_desc_put_fr(ib_conn, desc);
return err;
}
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir)
-{
- struct iser_device *device = task->iser_conn->ib_conn.device;
-
- device->reg_ops->unreg_mem(task, dir);
-}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 127887c6c03f..c1f44c41f501 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -68,11 +68,12 @@ static void iser_event_handler(struct ib_event_handler *handler,
static int iser_create_device_ib_res(struct iser_device *device)
{
struct ib_device *ib_dev = device->ib_device;
- int ret, i, max_cqe;
+ int i, max_cqe;
- ret = iser_assign_reg_ops(device);
- if (ret)
- return ret;
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ iser_err("IB device does not support memory registrations\n");
+ return -1;
+ }
device->comps_used = min_t(int, num_online_cpus(),
ib_dev->num_comp_vectors);
@@ -147,96 +148,6 @@ static void iser_free_device_ib_res(struct iser_device *device)
device->pd = NULL;
}
-/**
- * iser_alloc_fmr_pool - Creates FMR pool and page_vector
- * @ib_conn: connection RDMA resources
- * @cmds_max: max number of SCSI commands for this connection
- * @size: max number of pages per map request
- *
- * Return: 0 on success, or errno code on failure
- */
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size)
-{
- struct iser_device *device = ib_conn->device;
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
- struct iser_page_vec *page_vec;
- struct iser_fr_desc *desc;
- struct ib_fmr_pool *fmr_pool;
- struct ib_fmr_pool_param params;
- int ret;
-
- INIT_LIST_HEAD(&fr_pool->list);
- spin_lock_init(&fr_pool->lock);
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
- GFP_KERNEL);
- if (!page_vec) {
- ret = -ENOMEM;
- goto err_frpl;
- }
-
- page_vec->pages = (u64 *)(page_vec + 1);
-
- params.page_shift = ilog2(SZ_4K);
- params.max_pages_per_fmr = size;
- /* make the pool size twice the max number of SCSI commands *
- * the ML is expected to queue, watermark for unmap at 50% */
- params.pool_size = cmds_max * 2;
- params.dirty_watermark = cmds_max;
- params.cache = 0;
- params.flush_function = NULL;
- params.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- fmr_pool = ib_create_fmr_pool(device->pd, &params);
- if (IS_ERR(fmr_pool)) {
- ret = PTR_ERR(fmr_pool);
- iser_err("FMR allocation failed, err %d\n", ret);
- goto err_fmr;
- }
-
- desc->rsc.page_vec = page_vec;
- desc->rsc.fmr_pool = fmr_pool;
- list_add(&desc->list, &fr_pool->list);
-
- return 0;
-
-err_fmr:
- kfree(page_vec);
-err_frpl:
- kfree(desc);
-
- return ret;
-}
-
-/**
- * iser_free_fmr_pool - releases the FMR pool and page vec
- * @ib_conn: connection RDMA resources
- */
-void iser_free_fmr_pool(struct ib_conn *ib_conn)
-{
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
- struct iser_fr_desc *desc;
-
- desc = list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
- list_del(&desc->list);
-
- iser_info("freeing conn %p fmr pool %p\n",
- ib_conn, desc->rsc.fmr_pool);
-
- ib_destroy_fmr_pool(desc->rsc.fmr_pool);
- kfree(desc->rsc.page_vec);
- kfree(desc);
-}
-
static struct iser_fr_desc *
iser_create_fastreg_desc(struct iser_device *device,
struct ib_pd *pd,
@@ -667,13 +578,12 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
u32 max_num_sg;
/*
- * FRs without SG_GAPS or FMRs can only map up to a (device) page per
- * entry, but if the first entry is misaligned we'll end up using two
- * entries (head and tail) for a single page worth data, so one
- * additional entry is required.
+ * FRs without SG_GAPS can only map up to a (device) page per entry,
+ * but if the first entry is misaligned we'll end up using two entries
+ * (head and tail) for a single page worth data, so one additional
+ * entry is required.
*/
- if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) &&
- (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ if (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
reserved_mr_pages = 0;
else
reserved_mr_pages = 1;
@@ -684,14 +594,8 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
max_num_sg = attr->max_fast_reg_page_list_len;
sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
- if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
- sup_sg_tablesize =
- min_t(
- uint, ISCSI_ISER_MAX_SG_TABLESIZE,
- max_num_sg - reserved_mr_pages);
- else
- sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
-
+ sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE,
+ max_num_sg - reserved_mr_pages);
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
iser_conn->pages_per_mr =
iser_conn->scsi_sg_tablesize + reserved_mr_pages;
@@ -755,7 +659,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
struct iser_cm_hdr req_hdr;
struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
+ struct ib_device *ib_dev = ib_conn->device->ib_device;
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
@@ -766,14 +670,14 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
goto failure;
memset(&conn_param, 0, sizeof conn_param);
- conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom;
+ conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
conn_param.initiator_depth = 1;
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 6;
memset(&req_hdr, 0, sizeof(req_hdr));
req_hdr.flags = ISER_ZBVA_NOT_SUP;
- if (!device->remote_inv_sup)
+ if (!iser_always_reg)
req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
conn_param.private_data = (void *)&req_hdr;
conn_param.private_data_len = sizeof(struct iser_cm_hdr);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a1a035270cab..b7df38ee8ae0 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -15,6 +15,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_cm.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -502,7 +503,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
isert_dbg("iscsi_np is not enabled, reject connect request\n");
- return rdma_reject(cma_id, NULL, 0);
+ return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
}
spin_unlock_bh(&np->np_thread_lock);
@@ -553,7 +554,7 @@ out_rsp_dma_map:
isert_free_login_buf(isert_conn);
out:
kfree(isert_conn);
- rdma_reject(cma_id, NULL, 0);
+ rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 6e8d650c17c7..874a8eb7638c 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -113,7 +113,7 @@ struct opa_vnic_vema_port {
struct mutex lock;
};
-static void opa_vnic_vema_add_one(struct ib_device *device);
+static int opa_vnic_vema_add_one(struct ib_device *device);
static void opa_vnic_vema_rem_one(struct ib_device *device,
void *client_data);
@@ -989,18 +989,18 @@ static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en)
*
* Allocate the vnic control port and initialize it.
*/
-static void opa_vnic_vema_add_one(struct ib_device *device)
+static int opa_vnic_vema_add_one(struct ib_device *device)
{
struct opa_vnic_ctrl_port *cport;
int rc, size = sizeof(*cport);
if (!rdma_cap_opa_vnic(device))
- return;
+ return -EOPNOTSUPP;
size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port);
cport = kzalloc(size, GFP_KERNEL);
if (!cport)
- return;
+ return -ENOMEM;
cport->num_ports = device->phys_port_cnt;
cport->ibdev = device;
@@ -1012,6 +1012,7 @@ static void opa_vnic_vema_add_one(struct ib_device *device)
ib_set_client_data(device, &opa_vnic_client, cport);
opa_vnic_ctrl_config_dev(cport, true);
+ return 0;
}
/**
@@ -1026,9 +1027,6 @@ static void opa_vnic_vema_rem_one(struct ib_device *device,
{
struct opa_vnic_ctrl_port *cport = client_data;
- if (!cport)
- return;
-
c_info("removing VNIC client\n");
opa_vnic_ctrl_config_dev(cport, false);
vema_unregister(cport);
diff --git a/drivers/infiniband/ulp/rtrs/Kconfig b/drivers/infiniband/ulp/rtrs/Kconfig
new file mode 100644
index 000000000000..9092b62e6dc8
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config INFINIBAND_RTRS
+ tristate
+ depends on INFINIBAND_ADDR_TRANS
+
+config INFINIBAND_RTRS_CLIENT
+ tristate "RTRS client module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport client module.
+
+ RDMA Transport (RTRS) client implements a reliable transport layer
+ and also multipathing functionality and that it is intended to be
+ the base layer for a block storage initiator over RDMA.
+
+config INFINIBAND_RTRS_SERVER
+ tristate "RTRS server module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport server module.
+
+ RDMA Transport (RTRS) server module processing connection and IO
+ requests received from the RTRS client module, it will pass the
+ IO requests to its user eg. RNBD_server.
diff --git a/drivers/infiniband/ulp/rtrs/Makefile b/drivers/infiniband/ulp/rtrs/Makefile
new file mode 100644
index 000000000000..3898509be270
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+rtrs-client-y := rtrs-clt.o \
+ rtrs-clt-stats.o \
+ rtrs-clt-sysfs.o
+
+rtrs-server-y := rtrs-srv.o \
+ rtrs-srv-stats.o \
+ rtrs-srv-sysfs.o
+
+rtrs-core-y := rtrs.o
+
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs-core.o
+obj-$(CONFIG_INFINIBAND_RTRS_CLIENT) += rtrs-client.o
+obj-$(CONFIG_INFINIBAND_RTRS_SERVER) += rtrs-server.o
diff --git a/drivers/infiniband/ulp/rtrs/README b/drivers/infiniband/ulp/rtrs/README
new file mode 100644
index 000000000000..5d9ea142e5dd
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/README
@@ -0,0 +1,213 @@
+****************************
+RDMA Transport (RTRS)
+****************************
+
+RTRS (RDMA Transport) is a reliable high speed transport library
+which provides support to establish optimal number of connections
+between client and server machines using RDMA (InfiniBand, RoCE, iWarp)
+transport. It is optimized to transfer (read/write) IO blocks.
+
+In its core interface it follows the BIO semantics of providing the
+possibility to either write data from an sg list to the remote side
+or to request ("read") data transfer from the remote side into a given
+sg list.
+
+RTRS provides I/O fail-over and load-balancing capabilities by using
+multipath I/O (see "add_path" and "mp_policy" configuration entries in
+Documentation/ABI/testing/sysfs-class-rtrs-client).
+
+RTRS is used by the RNBD (RDMA Network Block Device) modules.
+
+==================
+Transport protocol
+==================
+
+Overview
+--------
+An established connection between a client and a server is called rtrs
+session. A session is associated with a set of memory chunks reserved on the
+server side for a given client for rdma transfer. A session
+consists of multiple paths, each representing a separate physical link
+between client and server. Those are used for load balancing and failover.
+Each path consists of as many connections (QPs) as there are cpus on
+the client.
+
+When processing an incoming write or read request, rtrs client uses memory
+chunks reserved for him on the server side. Their number, size and addresses
+need to be exchanged between client and server during the connection
+establishment phase. Apart from the memory related information client needs to
+inform the server about the session name and identify each path and connection
+individually.
+
+On an established session client sends to server write or read messages.
+Server uses immediate field to tell the client which request is being
+acknowledged and for errno. Client uses immediate field to tell the server
+which of the memory chunks has been accessed and at which offset the message
+can be found.
+
+Module parameter always_invalidate is introduced for the security problem
+discussed in LPC RDMA MC 2019. When always_invalidate=Y, on the server side we
+invalidate each rdma buffer before we hand it over to RNBD server and
+then pass it to the block layer. A new rkey is generated and registered for the
+buffer after it returns back from the block layer and RNBD server.
+The new rkey is sent back to the client along with the IO result.
+The procedure is the default behaviour of the driver. This invalidation and
+registration on each IO causes performance drop of up to 20%. A user of the
+driver may choose to load the modules with this mechanism switched off
+(always_invalidate=N), if he understands and can take the risk of a malicious
+client being able to corrupt memory of a server it is connected to. This might
+be a reasonable option in a scenario where all the clients and all the servers
+are located within a secure datacenter.
+
+
+Connection establishment
+------------------------
+
+1. Client starts establishing connections belonging to a path of a session one
+by one via attaching RTRS_MSG_CON_REQ messages to the rdma_connect requests.
+Those include uuid of the session and uuid of the path to be
+established. They are used by the server to find a persisting session/path or
+to create a new one when necessary. The message also contains the protocol
+version and magic for compatibility, total number of connections per session
+(as many as cpus on the client), the id of the current connection and
+the reconnect counter, which is used to resolve the situations where
+client is trying to reconnect a path, while server is still destroying the old
+one.
+
+2. Server accepts the connection requests one by one and attaches
+RTRS_MSG_CONN_RSP messages to the rdma_accept. Apart from magic and
+protocol version, the messages include error code, queue depth supported by
+the server (number of memory chunks which are going to be allocated for that
+session) and the maximum size of one io, RTRS_MSG_NEW_RKEY_F flags is set
+when always_invalidate=Y.
+
+3. After all connections of a path are established client sends to server the
+RTRS_MSG_INFO_REQ message, containing the name of the session. This message
+requests the address information from the server.
+
+4. Server replies to the session info request message with RTRS_MSG_INFO_RSP,
+which contains the addresses and keys of the RDMA buffers allocated for that
+session.
+
+5. Session becomes connected after all paths to be established are connected
+(i.e. steps 1-4 finished for all paths requested for a session)
+
+6. Server and client exchange periodically heartbeat messages (empty rdma
+messages with an immediate field) which are used to detect a crash on remote
+side or network outage in an absence of IO.
+
+7. On any RDMA related error or in the case of a heartbeat timeout, the
+corresponding path is disconnected, all the inflight IO are failed over to a
+healthy path, if any, and the reconnect mechanism is triggered.
+
+CLT SRV
+*for each connection belonging to a path and for each path:
+RTRS_MSG_CON_REQ ------------------->
+ <------------------- RTRS_MSG_CON_RSP
+...
+*after all connections are established:
+RTRS_MSG_INFO_REQ ------------------->
+ <------------------- RTRS_MSG_INFO_RSP
+*heartbeat is started from both sides:
+ -------------------> [RTRS_HB_MSG_IMM]
+[RTRS_HB_MSG_ACK] <-------------------
+[RTRS_HB_MSG_IMM] <-------------------
+ -------------------> [RTRS_HB_MSG_ACK]
+
+IO path
+-------
+
+* Write (always_invalidate=N) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+* Write (always_invalidate=Y) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field, Server invalidate rkey associated to the memory chunks
+first, when it finishes, pass the IO to RNBD server module.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+
+* Read (always_invalidate=N)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+
+* Read (always_invalidate=Y)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+Server invalidate rkey associated to the memory chunks first, when it finishes,
+passes the IO to RNBD server module.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+=========================================
+Contributors List(in alphabetical order)
+=========================================
+Danil Kipnis <danil.kipnis@profitbricks.com>
+Fabian Holler <mail@fholler.de>
+Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
+Jack Wang <jinpu.wang@profitbricks.com>
+Kleber Souza <kleber.souza@profitbricks.com>
+Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
+Milind Dumbare <Milind.dumbare@gmail.com>
+Roman Penyaev <roman.penyaev@profitbricks.com>
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
new file mode 100644
index 000000000000..26bbe5d6dff5
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-clt.h"
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_stats *stats = sess->stats;
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+ s = this_cpu_ptr(stats->pcpu_stats);
+ if (unlikely(con->cpu != cpu)) {
+ s->cpu_migr.to++;
+
+ /* Careful here, override s pointer */
+ s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
+ atomic_inc(&s->cpu_migr.from);
+ }
+}
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ s = this_cpu_ptr(stats->pcpu_stats);
+ s->rdma.failover_cnt++;
+}
+
+int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats,
+ char *buf, size_t len)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ size_t used;
+ int cpu;
+
+ used = scnprintf(buf, len, " ");
+ for_each_possible_cpu(cpu)
+ used += scnprintf(buf + used, len - used, " CPU%u", cpu);
+
+ used += scnprintf(buf + used, len - used, "\nfrom:");
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += scnprintf(buf + used, len - used, " %d",
+ atomic_read(&s->cpu_migr.from));
+ }
+
+ used += scnprintf(buf + used, len - used, "\nto :");
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += scnprintf(buf + used, len - used, " %d",
+ s->cpu_migr.to);
+ }
+ used += scnprintf(buf + used, len - used, "\n");
+
+ return used;
+}
+
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len)
+{
+ return scnprintf(buf, len, "%d %d\n",
+ stats->reconnects.successful_cnt,
+ stats->reconnects.fail_cnt);
+}
+
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len)
+{
+ struct rtrs_clt_stats_rdma sum;
+ struct rtrs_clt_stats_rdma *r;
+ int cpu;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ sum.failover_cnt += r->failover_cnt;
+ }
+
+ return scnprintf(page, len, "%llu %llu %llu %llu %u %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
+ atomic_read(&stats->inflight), sum.failover_cnt);
+}
+
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s,
+ char *page, size_t len)
+{
+ return scnprintf(page, len, "echo 1 to reset all statistics\n");
+}
+
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->rdma, 0, sizeof(s->rdma));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
+{
+ if (!enable)
+ return -EINVAL;
+
+ memset(&stats->reconnects, 0, sizeof(stats->reconnects));
+
+ return 0;
+}
+
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
+{
+ if (enable) {
+ rtrs_clt_reset_rdma_stats(s, enable);
+ rtrs_clt_reset_cpu_migr_stats(s, enable);
+ rtrs_clt_reset_reconnects_stat(s, enable);
+ atomic_set(&s->inflight, 0);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
+ size_t size, int d)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ s = this_cpu_ptr(stats->pcpu_stats);
+ s->rdma.dir[d].cnt++;
+ s->rdma.dir[d].size_total += size;
+}
+
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_stats *stats = sess->stats;
+ unsigned int len;
+
+ len = req->usr_len + req->data_len;
+ rtrs_clt_update_rdma_stats(stats, len, dir);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_inc(&stats->inflight);
+}
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
+{
+ stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
+ if (!stats->pcpu_stats)
+ return -ENOMEM;
+
+ /*
+ * successful_cnt will be set to 0 after session
+ * is established for the first time
+ */
+ stats->reconnects.successful_cnt = -1;
+
+ return 0;
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
new file mode 100644
index 000000000000..298b747d0330
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+
+#define MIN_MAX_RECONN_ATT -1
+#define MAX_MAX_RECONN_ATT 9999
+
+static void rtrs_clt_sess_release(struct kobject *kobj)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+
+ free_sess(sess);
+}
+
+static struct kobj_type ktype_sess = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_sess_release
+};
+
+static void rtrs_clt_sess_stats_release(struct kobject *kobj)
+{
+ struct rtrs_clt_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_clt_stats, kobj_stats);
+
+ free_percpu(stats->pcpu_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_sess_stats_release,
+};
+
+static ssize_t max_reconnect_attempts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ return sprintf(page, "%d\n", rtrs_clt_get_max_reconnect_attempts(clt));
+}
+
+static ssize_t max_reconnect_attempts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int value;
+ int ret;
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (ret) {
+ rtrs_err(clt, "%s: failed to convert string '%s' to int\n",
+ attr->attr.name, buf);
+ return ret;
+ }
+ if (value > MAX_MAX_RECONN_ATT ||
+ value < MIN_MAX_RECONN_ATT) {
+ rtrs_err(clt,
+ "%s: invalid range (provided: '%s', accepted: min: %d, max: %d)\n",
+ attr->attr.name, buf, MIN_MAX_RECONN_ATT,
+ MAX_MAX_RECONN_ATT);
+ return -EINVAL;
+ }
+ rtrs_clt_set_max_reconnect_attempts(clt, value);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(max_reconnect_attempts);
+
+static ssize_t mpath_policy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt *clt;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ switch (clt->mp_policy) {
+ case MP_POLICY_RR:
+ return sprintf(page, "round-robin (RR: %d)\n", clt->mp_policy);
+ case MP_POLICY_MIN_INFLIGHT:
+ return sprintf(page, "min-inflight (MI: %d)\n", clt->mp_policy);
+ default:
+ return sprintf(page, "Unknown (%d)\n", clt->mp_policy);
+ }
+}
+
+static ssize_t mpath_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rtrs_clt *clt;
+ int value;
+ int ret;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (!ret && (value == MP_POLICY_RR ||
+ value == MP_POLICY_MIN_INFLIGHT)) {
+ clt->mp_policy = value;
+ return count;
+ }
+
+ if (!strncasecmp(buf, "round-robin", 11) ||
+ !strncasecmp(buf, "rr", 2))
+ clt->mp_policy = MP_POLICY_RR;
+ else if (!strncasecmp(buf, "min-inflight", 12) ||
+ !strncasecmp(buf, "mi", 2))
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(mpath_policy);
+
+static ssize_t add_path_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE,
+ "Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
+ attr->attr.name);
+}
+
+static ssize_t add_path_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sockaddr_storage srcaddr, dstaddr;
+ struct rtrs_addr addr = {
+ .src = &srcaddr,
+ .dst = &dstaddr
+ };
+ struct rtrs_clt *clt;
+ const char *nl;
+ size_t len;
+ int err;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ nl = strchr(buf, '\n');
+ if (nl)
+ len = nl - buf;
+ else
+ len = count;
+ err = rtrs_addr_to_sockaddr(buf, len, clt->port, &addr);
+ if (err)
+ return -EINVAL;
+
+ err = rtrs_clt_create_path_from_sysfs(clt, &addr);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(add_path);
+
+static ssize_t rtrs_clt_state_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (sess->state == RTRS_CLT_CONNECTED)
+ return sprintf(page, "connected\n");
+
+ return sprintf(page, "disconnected\n");
+}
+
+static struct kobj_attribute rtrs_clt_state_attr =
+ __ATTR(state, 0444, rtrs_clt_state_show, NULL);
+
+static ssize_t rtrs_clt_reconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_reconnect_from_sysfs(sess);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_reconnect_attr =
+ __ATTR(reconnect, 0644, rtrs_clt_reconnect_show,
+ rtrs_clt_reconnect_store);
+
+static ssize_t rtrs_clt_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_disconnect_from_sysfs(sess);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_disconnect_attr =
+ __ATTR(disconnect, 0644, rtrs_clt_disconnect_show,
+ rtrs_clt_disconnect_store);
+
+static ssize_t rtrs_clt_remove_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_remove_path_attr =
+ __ATTR(remove_path, 0644, rtrs_clt_remove_path_show,
+ rtrs_clt_remove_path_store);
+
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration,
+ rtrs_clt_stats_migration_cnt_to_str,
+ rtrs_clt_reset_cpu_migr_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reconnects,
+ rtrs_clt_stats_reconnects_to_str,
+ rtrs_clt_reset_reconnects_stat);
+
+STAT_ATTR(struct rtrs_clt_stats, rdma,
+ rtrs_clt_stats_rdma_to_str,
+ rtrs_clt_reset_rdma_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reset_all,
+ rtrs_clt_reset_all_help,
+ rtrs_clt_reset_all_stats);
+
+static struct attribute *rtrs_clt_stats_attrs[] = {
+ &cpu_migration_attr.attr,
+ &reconnects_attr.attr,
+ &rdma_attr.attr,
+ &reset_all_attr.attr,
+ NULL
+};
+
+static struct attribute_group rtrs_clt_stats_attr_group = {
+ .attrs = rtrs_clt_stats_attrs,
+};
+
+static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, typeof(*sess), kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%u\n", sess->hca_port);
+}
+
+static struct kobj_attribute rtrs_clt_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_clt_hca_port_show, NULL);
+
+static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", sess->hca_name);
+}
+
+static struct kobj_attribute rtrs_clt_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_clt_hca_name_show, NULL);
+
+static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_clt_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_clt_src_addr_show, NULL);
+
+static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_clt_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
+
+static struct attribute *rtrs_clt_sess_attrs[] = {
+ &rtrs_clt_hca_name_attr.attr,
+ &rtrs_clt_hca_port_attr.attr,
+ &rtrs_clt_src_addr_attr.attr,
+ &rtrs_clt_dst_addr_attr.attr,
+ &rtrs_clt_state_attr.attr,
+ &rtrs_clt_reconnect_attr.attr,
+ &rtrs_clt_disconnect_attr.attr,
+ &rtrs_clt_remove_path_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_clt_sess_attr_group = {
+ .attrs = rtrs_clt_sess_attrs,
+};
+
+int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ char str[NAME_MAX];
+ int err, cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ str, sizeof(str));
+ cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ str + cnt, sizeof(str) - cnt);
+
+ err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths,
+ "%s", str);
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ return err;
+ }
+ err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+ if (err) {
+ pr_err("sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
+ &sess->kobj, "stats");
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ goto remove_group;
+ }
+
+ err = sysfs_create_group(&sess->stats->kobj_stats,
+ &rtrs_clt_stats_attr_group);
+ if (err) {
+ pr_err("failed to create stats sysfs group, err: %d\n", err);
+ goto put_kobj_stats;
+ }
+
+ return 0;
+
+put_kobj_stats:
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+remove_group:
+ sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+put_kobj:
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+
+ return err;
+}
+
+void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self)
+{
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+ if (sysfs_self)
+ sysfs_remove_file_self(&sess->kobj, sysfs_self);
+ kobject_del(&sess->kobj);
+}
+
+static struct attribute *rtrs_clt_attrs[] = {
+ &dev_attr_max_reconnect_attempts.attr,
+ &dev_attr_mpath_policy.attr,
+ &dev_attr_add_path.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_clt_attr_group = {
+ .attrs = rtrs_clt_attrs,
+};
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt)
+{
+ return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+}
+
+void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt)
+{
+ if (clt->kobj_paths) {
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ }
+}
+
+void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt)
+{
+ sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
new file mode 100644
index 000000000000..564388a85603
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -0,0 +1,2992 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+
+#define RTRS_CONNECT_TIMEOUT_MS 30000
+/*
+ * Wait a bit before trying to reconnect after a failure
+ * in order to give server time to finish clean up which
+ * leads to "false positives" failed reconnect attempts
+ */
+#define RTRS_RECONNECT_BACKOFF 1000
+
+MODULE_DESCRIPTION("RDMA Transport Client");
+MODULE_LICENSE("GPL");
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
+static struct rtrs_rdma_dev_pd dev_pd = {
+ .ops = &dev_pd_ops
+};
+
+static struct workqueue_struct *rtrs_wq;
+static struct class *rtrs_clt_dev_class;
+
+static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
+{
+ struct rtrs_clt_sess *sess;
+ bool connected = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
+ connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
+ rcu_read_unlock();
+
+ return connected;
+}
+
+static struct rtrs_permit *
+__rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
+{
+ size_t max_depth = clt->queue_depth;
+ struct rtrs_permit *permit;
+ int bit;
+
+ /*
+ * Adapted from null_blk get_tag(). Callers from different cpus may
+ * grab the same bit, since find_first_zero_bit is not atomic.
+ * But then the test_and_set_bit_lock will fail for all the
+ * callers but one, so that they will loop again.
+ * This way an explicit spinlock is not required.
+ */
+ do {
+ bit = find_first_zero_bit(clt->permits_map, max_depth);
+ if (unlikely(bit >= max_depth))
+ return NULL;
+ } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map)));
+
+ permit = get_permit(clt, bit);
+ WARN_ON(permit->mem_id != bit);
+ permit->cpu_id = raw_smp_processor_id();
+ permit->con_type = con_type;
+
+ return permit;
+}
+
+static inline void __rtrs_put_permit(struct rtrs_clt *clt,
+ struct rtrs_permit *permit)
+{
+ clear_bit_unlock(permit->mem_id, clt->permits_map);
+}
+
+/**
+ * rtrs_clt_get_permit() - allocates permit for future RDMA operation
+ * @clt: Current session
+ * @con_type: Type of connection to use with the permit
+ * @can_wait: Wait type
+ *
+ * Description:
+ * Allocates permit for the following RDMA operation. Permit is used
+ * to preallocate all resources and to propagate memory pressure
+ * up earlier.
+ *
+ * Context:
+ * Can sleep if @wait == RTRS_TAG_WAIT
+ */
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
+ enum rtrs_clt_con_type con_type,
+ int can_wait)
+{
+ struct rtrs_permit *permit;
+ DEFINE_WAIT(wait);
+
+ permit = __rtrs_get_permit(clt, con_type);
+ if (likely(permit) || !can_wait)
+ return permit;
+
+ do {
+ prepare_to_wait(&clt->permits_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ permit = __rtrs_get_permit(clt, con_type);
+ if (likely(permit))
+ break;
+
+ io_schedule();
+ } while (1);
+
+ finish_wait(&clt->permits_wait, &wait);
+
+ return permit;
+}
+EXPORT_SYMBOL(rtrs_clt_get_permit);
+
+/**
+ * rtrs_clt_put_permit() - puts allocated permit
+ * @clt: Current session
+ * @permit: Permit to be freed
+ *
+ * Context:
+ * Does not matter
+ */
+void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
+{
+ if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
+ return;
+
+ __rtrs_put_permit(clt, permit);
+
+ /*
+ * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
+ * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
+ * it must have added itself to &clt->permits_wait before
+ * __rtrs_put_permit() finished.
+ * Hence it is safe to guard wake_up() with a waitqueue_active() test.
+ */
+ if (waitqueue_active(&clt->permits_wait))
+ wake_up(&clt->permits_wait);
+}
+EXPORT_SYMBOL(rtrs_clt_put_permit);
+
+void *rtrs_permit_to_pdu(struct rtrs_permit *permit)
+{
+ return permit + 1;
+}
+EXPORT_SYMBOL(rtrs_permit_to_pdu);
+
+/**
+ * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
+ * @sess: client session pointer
+ * @permit: permit for the allocation of the RDMA buffer
+ * Note:
+ * IO connection starts from 1.
+ * 0 connection is for user messages.
+ */
+static
+struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
+ struct rtrs_permit *permit)
+{
+ int id = 0;
+
+ if (likely(permit->con_type == RTRS_IO_CON))
+ id = (permit->cpu_id % (sess->s.con_num - 1)) + 1;
+
+ return to_clt_con(sess->s.con[id]);
+}
+
+/**
+ * __rtrs_clt_change_state() - change the session state through session state
+ * machine.
+ *
+ * @sess: client session to change the state of.
+ * @new_state: state to change to.
+ *
+ * returns true if successful, false if the requested state can not be set.
+ *
+ * Locks:
+ * state_wq lock must be hold.
+ */
+static bool __rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state)
+{
+ enum rtrs_clt_state old_state;
+ bool changed = false;
+
+ lockdep_assert_held(&sess->state_wq.lock);
+
+ old_state = sess->state;
+ switch (new_state) {
+ case RTRS_CLT_CONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_RECONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_RECONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTED:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTED:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTING_ERR:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_RECONNECTING:
+ case RTRS_CLT_CONNECTED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSED:
+ switch (old_state) {
+ case RTRS_CLT_CLOSING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_DEAD:
+ switch (old_state) {
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (changed) {
+ sess->state = new_state;
+ wake_up_locked(&sess->state_wq);
+ }
+
+ return changed;
+}
+
+static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state old_state,
+ enum rtrs_clt_state new_state)
+{
+ bool changed = false;
+
+ spin_lock_irq(&sess->state_wq.lock);
+ if (sess->state == old_state)
+ changed = __rtrs_clt_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_wq.lock);
+
+ return changed;
+}
+
+static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ if (rtrs_clt_change_state_from_to(sess,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_RECONNECTING)) {
+ struct rtrs_clt *clt = sess->clt;
+ unsigned int delay_ms;
+
+ /*
+ * Normal scenario, reconnect if we were successfully connected
+ */
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ msecs_to_jiffies(delay_ms));
+ } else {
+ /*
+ * Error can happen just on establishing new connection,
+ * so notify waiter with error state, waiter is responsible
+ * for cleaning the rest and reconnect if needed.
+ */
+ rtrs_clt_change_state_from_to(sess,
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR);
+ }
+}
+
+static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static struct ib_cqe fast_reg_cqe = {
+ .done = rtrs_clt_fast_reg_done
+};
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait);
+
+static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_io_req *req =
+ container_of(wc->wr_cqe, typeof(*req), inv_cqe);
+ struct rtrs_clt_con *con = cq->cq_context;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ req->need_inv = false;
+ if (likely(req->need_inv_comp))
+ complete(&req->inv_comp);
+ else
+ /* Complete request from INV callback */
+ complete_rdma_req(req, req->inv_errno, true, false);
+}
+
+static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &req->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+ req->inv_cqe.done = rtrs_clt_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_sess *sess;
+ int err;
+
+ if (WARN_ON(!req->in_use))
+ return;
+ if (WARN_ON(!req->con))
+ return;
+ sess = to_clt_sess(con->c.sess);
+
+ if (req->sg_cnt) {
+ if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) {
+ /*
+ * We are here to invalidate read requests
+ * ourselves. In normal scenario server should
+ * send INV for all read requests, but
+ * we are here, thus two things could happen:
+ *
+ * 1. this is failover, when errno != 0
+ * and can_wait == 1,
+ *
+ * 2. something totally bad happened and
+ * server forgot to send INV, so we
+ * should do that ourselves.
+ */
+
+ if (likely(can_wait)) {
+ req->need_inv_comp = true;
+ } else {
+ /* This should be IO path, so always notify */
+ WARN_ON(!notify);
+ /* Save errno for INV callback */
+ req->inv_errno = errno;
+ }
+
+ err = rtrs_inv_rkey(req);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
+ req->mr->rkey, err);
+ } else if (likely(can_wait)) {
+ wait_for_completion(&req->inv_comp);
+ } else {
+ /*
+ * Something went wrong, so request will be
+ * completed from INV callback.
+ */
+ WARN_ON_ONCE(1);
+
+ return;
+ }
+ }
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+
+ req->in_use = false;
+ req->con = NULL;
+
+ if (notify)
+ req->conf(req->priv, errno);
+}
+
+static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, u32 off,
+ u32 imm, struct ib_send_wr *wr)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ enum ib_send_flags flags;
+ struct ib_sge sge;
+
+ if (unlikely(!req->sg_size)) {
+ rtrs_wrn(con->c.sess,
+ "Doing RDMA Write failed, no data supplied\n");
+ return -EINVAL;
+ }
+
+ /* user data and user message in the first list element */
+ sge.addr = req->iu->dma_addr;
+ sge.length = req->sg_size;
+ sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ req->sg_size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
+ rbuf->rkey, rbuf->addr + off,
+ imm, flags, wr);
+}
+
+static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
+ s16 errno, bool w_inval)
+{
+ struct rtrs_clt_io_req *req;
+
+ if (WARN_ON(msg_id >= sess->queue_depth))
+ return;
+
+ req = &sess->reqs[msg_id];
+ /* Drop need_inv if server responded with send with invalidation */
+ req->need_inv &= !w_inval;
+ complete_rdma_req(req, errno, true, false);
+}
+
+static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_iu *iu;
+ int err;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu,
+ cqe);
+ err = rtrs_iu_post_recv(&con->c, iu);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "post iu failed %d\n", err);
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_msg_rkey_rsp *msg;
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ struct rtrs_iu *iu;
+ u32 buf_id;
+ int err;
+
+ WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) {
+ rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ buf_id = le16_to_cpu(msg->buf_id);
+ if (WARN_ON(buf_id >= sess->queue_depth))
+ goto out;
+
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM)) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ if (WARN_ON(buf_id != msg_id))
+ goto out;
+ sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
+ process_io_rsp(sess, msg_id, err, w_inval);
+ }
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ return rtrs_clt_recv_done(con, wc);
+out:
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_clt_rdma_done
+};
+
+/*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr_arr[2], *wr;
+ int i;
+
+ memset(wr_arr, 0, sizeof(wr_arr));
+ for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
+ wr = &wr_arr[i];
+ wr->wr_cqe = cqe;
+ if (i)
+ /* Chain backwards */
+ wr->next = &wr_arr[i - 1];
+ }
+
+ return ib_post_recv(con->qp, wr, NULL);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ int err;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(sess->clt, "RDMA failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ return;
+ }
+ rtrs_clt_update_wc_stats(con);
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
+ return;
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM)) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ process_io_rsp(sess, msg_id, err, w_inval);
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&sess->s);
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ sess->s.hb_missed_cnt = 0;
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else {
+ rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
+ imm_type);
+ }
+ if (w_inval)
+ /*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+ err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
+ else
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
+ err);
+ rtrs_rdma_error_recovery(con);
+ break;
+ }
+ break;
+ case IB_WC_RECV:
+ /*
+ * Key invalidations from server side
+ */
+ WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
+ wc->wc_flags & IB_WC_WITH_IMM));
+ WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
+ return rtrs_clt_recv_done(con, wc);
+
+ return rtrs_clt_rkey_rsp_done(con, wc);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ break;
+
+ default:
+ rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
+{
+ int err, i;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ for (i = 0; i < q_size; i++) {
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ struct rtrs_iu *iu = &con->rsp_ius[i];
+
+ err = rtrs_iu_post_recv(&con->c, iu);
+ } else {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ }
+ if (unlikely(err))
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_sess(struct rtrs_clt_sess *sess)
+{
+ size_t q_size = 0;
+ int err, cid;
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = sess->queue_depth;
+
+ /*
+ * x2 for RDMA read responses + FR key invalidations,
+ * RDMA writes do not require any FR registrations.
+ */
+ q_size *= 2;
+
+ err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+struct path_it {
+ int i;
+ struct list_head skip_list;
+ struct rtrs_clt *clt;
+ struct rtrs_clt_sess *(*next_path)(struct path_it *it);
+};
+
+/**
+ * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
+ * @head: the head for the list.
+ * @ptr: the list head to take the next element from.
+ * @type: the type of the struct this is embedded in.
+ * @memb: the name of the list_head within the struct.
+ *
+ * Next element returned in round-robin fashion, i.e. head will be skipped,
+ * but if list is observed as empty, NULL will be returned.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_next_or_null_rr_rcu(head, ptr, type, memb) \
+({ \
+ list_next_or_null_rcu(head, ptr, type, memb) ?: \
+ list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
+ type, memb); \
+})
+
+/**
+ * get_next_path_rr() - Returns path in round-robin fashion.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_RR
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
+{
+ struct rtrs_clt_sess __rcu **ppcpu_path;
+ struct rtrs_clt_sess *path;
+ struct rtrs_clt *clt;
+
+ clt = it->clt;
+
+ /*
+ * Here we use two RCU objects: @paths_list and @pcpu_path
+ * pointer. See rtrs_clt_remove_path_from_arr() for details
+ * how that is handled.
+ */
+
+ ppcpu_path = this_cpu_ptr(clt->pcpu_path);
+ path = rcu_dereference(*ppcpu_path);
+ if (unlikely(!path))
+ path = list_first_or_null_rcu(&clt->paths_list,
+ typeof(*path), s.entry);
+ else
+ path = list_next_or_null_rr_rcu(&clt->paths_list,
+ &path->s.entry,
+ typeof(*path),
+ s.entry);
+ rcu_assign_pointer(*ppcpu_path, path);
+
+ return path;
+}
+
+/**
+ * get_next_path_min_inflight() - Returns path with minimal inflight count.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_MIN_INFLIGHT
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
+{
+ struct rtrs_clt_sess *min_path = NULL;
+ struct rtrs_clt *clt = it->clt;
+ struct rtrs_clt_sess *sess;
+ int min_inflight = INT_MAX;
+ int inflight;
+
+ list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
+ if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
+ continue;
+
+ inflight = atomic_read(&sess->stats->inflight);
+
+ if (inflight < min_inflight) {
+ min_inflight = inflight;
+ min_path = sess;
+ }
+ }
+
+ /*
+ * add the path to the skip list, so that next time we can get
+ * a different one
+ */
+ if (min_path)
+ list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+
+ return min_path;
+}
+
+static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
+{
+ INIT_LIST_HEAD(&it->skip_list);
+ it->clt = clt;
+ it->i = 0;
+
+ if (clt->mp_policy == MP_POLICY_RR)
+ it->next_path = get_next_path_rr;
+ else
+ it->next_path = get_next_path_min_inflight;
+}
+
+static inline void path_it_deinit(struct path_it *it)
+{
+ struct list_head *skip, *tmp;
+ /*
+ * The skip_list is used only for the MIN_INFLIGHT policy.
+ * We need to remove paths from it, so that next IO can insert
+ * paths (->mp_skip_entry) into a skip_list again.
+ */
+ list_for_each_safe(skip, tmp, &it->skip_list)
+ list_del_init(skip);
+}
+
+/**
+ * rtrs_clt_init_req() Initialize an rtrs_clt_io_req holding information
+ * about an inflight IO.
+ * The user buffer holding user control message (not data) is copied into
+ * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
+ * also hold the control message of rtrs.
+ * @req: an io request holding information about IO.
+ * @sess: client session
+ * @conf: conformation callback function to notify upper layer.
+ * @permit: permit for allocation of RDMA remote buffer
+ * @priv: private pointer
+ * @vec: kernel vector containing control message
+ * @usr_len: length of the user message
+ * @sg: scater list for IO data
+ * @sg_cnt: number of scater list entries
+ * @data_len: length of the IO data
+ * @dir: direction of the IO.
+ */
+static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
+ struct rtrs_clt_sess *sess,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct iov_iter iter;
+ size_t len;
+
+ req->permit = permit;
+ req->in_use = true;
+ req->usr_len = usr_len;
+ req->data_len = data_len;
+ req->sglist = sg;
+ req->sg_cnt = sg_cnt;
+ req->priv = priv;
+ req->dir = dir;
+ req->con = rtrs_permit_to_clt_con(sess, permit);
+ req->conf = conf;
+ req->need_inv = false;
+ req->need_inv_comp = false;
+ req->inv_errno = 0;
+
+ iov_iter_kvec(&iter, READ, vec, 1, usr_len);
+ len = _copy_from_iter(req->iu->buf, usr_len, &iter);
+ WARN_ON(len != usr_len);
+
+ reinit_completion(&req->inv_comp);
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_req(struct rtrs_clt_sess *sess,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct rtrs_clt_io_req *req;
+
+ req = &sess->reqs[permit->mem_id];
+ rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
+ sg, sg_cnt, data_len, dir);
+ return req;
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_io_req *req;
+ struct kvec vec = {
+ .iov_base = fail_req->iu->buf,
+ .iov_len = fail_req->usr_len
+ };
+
+ req = &alive_sess->reqs[fail_req->permit->mem_id];
+ rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
+ fail_req->priv, &vec, fail_req->usr_len,
+ fail_req->sglist, fail_req->sg_cnt,
+ fail_req->data_len, fail_req->dir);
+ return req;
+}
+
+static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf,
+ u32 size, u32 imm)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct ib_sge *sge = req->sge;
+ enum ib_send_flags flags;
+ struct scatterlist *sg;
+ size_t num_sge;
+ int i;
+
+ for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ }
+ sge[i].addr = req->iu->dma_addr;
+ sge[i].length = size;
+ sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+
+ num_sge = 1 + req->sg_cnt;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
+ rbuf->rkey, rbuf->addr, imm,
+ flags, NULL);
+}
+
+static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_msg_rdma_write *msg;
+
+ struct rtrs_rbuf *rbuf;
+ int ret, count = 0;
+ u32 imm, buf_id;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ if (unlikely(tsize > sess->chunk_size)) {
+ rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
+ tsize, sess->chunk_size);
+ return -EMSGSIZE;
+ }
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ if (unlikely(!count)) {
+ rtrs_wrn(s, "Write request failed, map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put rtrs msg after sg and user message */
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_WRITE);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ /* rtrs message on server side will be after user data and message */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+ req->sg_size = tsize;
+ rbuf = &sess->rbufs[buf_id];
+
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, WRITE);
+
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf,
+ req->usr_len + sizeof(*msg),
+ imm);
+ if (unlikely(ret)) {
+ rtrs_err(s, "Write request failed: %d\n", ret);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
+{
+ int nr;
+
+ /* Align the MR to a 4K page size to match the block virt boundary */
+ nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
+ if (nr < 0)
+ return nr;
+ if (unlikely(nr < req->sg_cnt))
+ return -EINVAL;
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ return nr;
+}
+
+static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_msg_rdma_read *msg;
+ struct rtrs_ib_dev *dev;
+
+ struct ib_reg_wr rwr;
+ struct ib_send_wr *wr = NULL;
+
+ int ret, count = 0;
+ u32 imm, buf_id;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ s = &sess->s;
+ dev = sess->s.dev;
+
+ if (unlikely(tsize > sess->chunk_size)) {
+ rtrs_wrn(s,
+ "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
+ tsize, sess->chunk_size);
+ return -EMSGSIZE;
+ }
+
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ if (unlikely(!count)) {
+ rtrs_wrn(s,
+ "Read request failed, dma map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put our message into req->buf after user message*/
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_READ);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ if (count) {
+ ret = rtrs_map_sg_fr(req, count);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Read request failed, failed to map fast reg. data, err: %d\n",
+ ret);
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ return ret;
+ }
+ rwr = (struct ib_reg_wr) {
+ .wr.opcode = IB_WR_REG_MR,
+ .wr.wr_cqe = &fast_reg_cqe,
+ .mr = req->mr,
+ .key = req->mr->rkey,
+ .access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE),
+ };
+ wr = &rwr.wr;
+
+ msg->sg_cnt = cpu_to_le16(1);
+ msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
+
+ msg->desc[0].addr = cpu_to_le64(req->mr->iova);
+ msg->desc[0].key = cpu_to_le32(req->mr->rkey);
+ msg->desc[0].len = cpu_to_le32(req->mr->length);
+
+ /* Further invalidation is required */
+ req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
+
+ } else {
+ msg->sg_cnt = 0;
+ msg->flags = 0;
+ }
+ /*
+ * rtrs message will be after the space reserved for disk data and
+ * user message
+ */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+
+ req->sg_size = sizeof(*msg);
+ req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
+ req->sg_size += req->usr_len;
+
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, READ);
+
+ ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
+ req->data_len, imm, wr);
+ if (unlikely(ret)) {
+ rtrs_err(s, "Read request failed: %d\n", ret);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+ req->need_inv = false;
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+/**
+ * rtrs_clt_failover_req() Try to find an active path for a failed request
+ * @clt: clt context
+ * @fail_req: a failed io request.
+ */
+static int rtrs_clt_failover_req(struct rtrs_clt *clt,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_sess *alive_sess;
+ struct rtrs_clt_io_req *req;
+ int err = -ECONNABORTED;
+ struct path_it it;
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
+ it.i++) {
+ if (unlikely(READ_ONCE(alive_sess->state) !=
+ RTRS_CLT_CONNECTED))
+ continue;
+ req = rtrs_clt_get_copy_req(alive_sess, fail_req);
+ if (req->dir == DMA_TO_DEVICE)
+ err = rtrs_clt_write_req(req);
+ else
+ err = rtrs_clt_read_req(req);
+ if (unlikely(err)) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ rtrs_clt_inc_failover_cnt(alive_sess->stats);
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+
+static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_io_req *req;
+ int i, err;
+
+ if (!sess->reqs)
+ return;
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ if (!req->in_use)
+ continue;
+
+ /*
+ * Safely (without notification) complete failed request.
+ * After completion this request is still useble and can
+ * be failovered to another path.
+ */
+ complete_rdma_req(req, -ECONNABORTED, false, true);
+
+ err = rtrs_clt_failover_req(clt, req);
+ if (unlikely(err))
+ /* Failover failed, notify anyway */
+ req->conf(req->priv, err);
+ }
+}
+
+static void free_sess_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_io_req *req;
+ int i;
+
+ if (!sess->reqs)
+ return;
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ if (req->mr)
+ ib_dereg_mr(req->mr);
+ kfree(req->sge);
+ rtrs_iu_free(req->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+ }
+ kfree(sess->reqs);
+ sess->reqs = NULL;
+}
+
+static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_io_req *req;
+ struct rtrs_clt *clt = sess->clt;
+ int i, err = -ENOMEM;
+
+ sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
+ GFP_KERNEL);
+ if (!sess->reqs)
+ return -ENOMEM;
+
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
+ sess->s.dev->ib_dev,
+ DMA_TO_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!req->iu)
+ goto out;
+
+ req->sge = kmalloc_array(clt->max_segments + 1,
+ sizeof(*req->sge), GFP_KERNEL);
+ if (!req->sge)
+ goto out;
+
+ req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ sess->max_pages_per_mr);
+ if (IS_ERR(req->mr)) {
+ err = PTR_ERR(req->mr);
+ req->mr = NULL;
+ pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
+ sess->max_pages_per_mr);
+ goto out;
+ }
+
+ init_completion(&req->inv_comp);
+ }
+
+ return 0;
+
+out:
+ free_sess_reqs(sess);
+
+ return err;
+}
+
+static int alloc_permits(struct rtrs_clt *clt)
+{
+ unsigned int chunk_bits;
+ int err, i;
+
+ clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
+ sizeof(long), GFP_KERNEL);
+ if (!clt->permits_map) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
+ if (!clt->permits) {
+ err = -ENOMEM;
+ goto err_map;
+ }
+ chunk_bits = ilog2(clt->queue_depth - 1) + 1;
+ for (i = 0; i < clt->queue_depth; i++) {
+ struct rtrs_permit *permit;
+
+ permit = get_permit(clt, i);
+ permit->mem_id = i;
+ permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
+ }
+
+ return 0;
+
+err_map:
+ kfree(clt->permits_map);
+ clt->permits_map = NULL;
+out_err:
+ return err;
+}
+
+static void free_permits(struct rtrs_clt *clt)
+{
+ kfree(clt->permits_map);
+ clt->permits_map = NULL;
+ kfree(clt->permits);
+ clt->permits = NULL;
+}
+
+static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
+{
+ struct ib_device *ib_dev;
+ u64 max_pages_per_mr;
+ int mr_page_shift;
+
+ ib_dev = sess->s.dev->ib_dev;
+
+ /*
+ * Use the smallest page size supported by the HCA, down to a
+ * minimum of 4096 bytes. We're unlikely to build large sglists
+ * out of smaller entries.
+ */
+ mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
+ max_pages_per_mr = ib_dev->attrs.max_mr_size;
+ do_div(max_pages_per_mr, (1ull << mr_page_shift));
+ sess->max_pages_per_mr =
+ min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
+ ib_dev->attrs.max_fast_reg_page_list_len);
+ sess->max_send_sge = ib_dev->attrs.max_send_sge;
+}
+
+static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state,
+ enum rtrs_clt_state *old_state)
+{
+ bool changed;
+
+ spin_lock_irq(&sess->state_wq.lock);
+ *old_state = sess->state;
+ changed = __rtrs_clt_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_wq.lock);
+
+ return changed;
+}
+
+static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state)
+{
+ enum rtrs_clt_state old_state;
+
+ return rtrs_clt_change_state_get_old(sess, new_state, &old_state);
+}
+
+static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
+{
+ struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
+
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_init_hb(&sess->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_clt_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_start_hb(&sess->s);
+}
+
+static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_stop_hb(&sess->s);
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work);
+static void rtrs_clt_close_work(struct work_struct *work);
+
+static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
+ const struct rtrs_addr *path,
+ size_t con_num, u16 max_segments,
+ size_t max_segment_size)
+{
+ struct rtrs_clt_sess *sess;
+ int err = -ENOMEM;
+ int cpu;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ goto err;
+
+ /* Extra connection for user messages */
+ con_num += 1;
+
+ sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
+ if (!sess->s.con)
+ goto err_free_sess;
+
+ sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
+ if (!sess->stats)
+ goto err_free_con;
+
+ mutex_init(&sess->init_mutex);
+ uuid_gen(&sess->s.uuid);
+ memcpy(&sess->s.dst_addr, path->dst,
+ rdma_addr_size((struct sockaddr *)path->dst));
+
+ /*
+ * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
+ * checks the sa_family to be non-zero. If user passed src_addr=NULL
+ * the sess->src_addr will contain only zeros, which is then fine.
+ */
+ if (path->src)
+ memcpy(&sess->s.src_addr, path->src,
+ rdma_addr_size((struct sockaddr *)path->src));
+ strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
+ sess->s.con_num = con_num;
+ sess->clt = clt;
+ sess->max_pages_per_mr = max_segments * max_segment_size >> 12;
+ init_waitqueue_head(&sess->state_wq);
+ sess->state = RTRS_CLT_CONNECTING;
+ atomic_set(&sess->connected_cnt, 0);
+ INIT_WORK(&sess->close_work, rtrs_clt_close_work);
+ INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
+ rtrs_clt_init_hb(sess);
+
+ sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
+ if (!sess->mp_skip_entry)
+ goto err_free_stats;
+
+ for_each_possible_cpu(cpu)
+ INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
+
+ err = rtrs_clt_init_stats(sess->stats);
+ if (err)
+ goto err_free_percpu;
+
+ return sess;
+
+err_free_percpu:
+ free_percpu(sess->mp_skip_entry);
+err_free_stats:
+ kfree(sess->stats);
+err_free_con:
+ kfree(sess->s.con);
+err_free_sess:
+ kfree(sess);
+err:
+ return ERR_PTR(err);
+}
+
+void free_sess(struct rtrs_clt_sess *sess)
+{
+ free_percpu(sess->mp_skip_entry);
+ mutex_destroy(&sess->init_mutex);
+ kfree(sess->s.con);
+ kfree(sess->rbufs);
+ kfree(sess);
+}
+
+static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
+{
+ struct rtrs_clt_con *con;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con)
+ return -ENOMEM;
+
+ /* Map first two connections to the first CPU */
+ con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
+ con->c.cid = cid;
+ con->c.sess = &sess->s;
+ atomic_set(&con->io_cnt, 0);
+
+ sess->s.con[cid] = &con->c;
+
+ return 0;
+}
+
+static void destroy_con(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ sess->s.con[con->c.cid] = NULL;
+ kfree(con);
+}
+
+static int create_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ u16 wr_queue_size;
+ int err, cq_vector;
+ struct rtrs_msg_rkey_rsp *rsp;
+
+ /*
+ * This function can fail, but still destroy_con_cq_qp() should
+ * be called, this is because create_con_cq_qp() is called on cm
+ * event path, thus caller/waiter never knows: have we failed before
+ * create_con_cq_qp() or after. To solve this dilemma without
+ * creating any additional flags just allow destroy_con_cq_qp() be
+ * called many times.
+ */
+
+ if (con->c.cid == 0) {
+ /*
+ * One completion for each receive and two for each send
+ * (send request + registration)
+ * + 2 for drain and heartbeat
+ * in case qp gets into error state
+ */
+ wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+ /* We must be the first here */
+ if (WARN_ON(sess->s.dev))
+ return -EINVAL;
+
+ /*
+ * The whole session uses device from user connection.
+ * Be careful not to close user connection before ib dev
+ * is gracefully put.
+ */
+ sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
+ &dev_pd);
+ if (!sess->s.dev) {
+ rtrs_wrn(sess->clt,
+ "rtrs_ib_dev_find_get_or_add(): no memory\n");
+ return -ENOMEM;
+ }
+ sess->s.dev_ref = 1;
+ query_fast_reg_mode(sess);
+ } else {
+ /*
+ * Here we assume that session members are correctly set.
+ * This is always true if user connection (cid == 0) is
+ * established first.
+ */
+ if (WARN_ON(!sess->s.dev))
+ return -EINVAL;
+ if (WARN_ON(!sess->queue_depth))
+ return -EINVAL;
+
+ /* Shared between connections */
+ sess->s.dev_ref++;
+ wr_queue_size =
+ min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+ sess->queue_depth * 3 + 1);
+ }
+ /* alloc iu to recv new rkey reply when server reports flags set */
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ con->rsp_ius = rtrs_iu_alloc(wr_queue_size, sizeof(*rsp),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!con->rsp_ius)
+ return -ENOMEM;
+ con->queue_size = wr_queue_size;
+ }
+ cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
+ err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
+ cq_vector, wr_queue_size, wr_queue_size,
+ IB_POLL_SOFTIRQ);
+ /*
+ * In case of error we do not bother to clean previous allocations,
+ * since destroy_con_cq_qp() must be called.
+ */
+ return err;
+}
+
+static void destroy_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ /*
+ * Be careful here: destroy_con_cq_qp() can be called even
+ * create_con_cq_qp() failed, see comments there.
+ */
+
+ rtrs_cq_qp_destroy(&con->c);
+ if (con->rsp_ius) {
+ rtrs_iu_free(con->rsp_ius, DMA_FROM_DEVICE,
+ sess->s.dev->ib_dev, con->queue_size);
+ con->rsp_ius = NULL;
+ con->queue_size = 0;
+ }
+ if (sess->s.dev_ref && !--sess->s.dev_ref) {
+ rtrs_ib_dev_put(sess->s.dev);
+ sess->s.dev = NULL;
+ }
+}
+
+static void stop_cm(struct rtrs_clt_con *con)
+{
+ rdma_disconnect(con->c.cm_id);
+ if (con->c.qp)
+ ib_drain_qp(con->c.qp);
+}
+
+static void destroy_cm(struct rtrs_clt_con *con)
+{
+ rdma_destroy_id(con->c.cm_id);
+ con->c.cm_id = NULL;
+}
+
+static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ int err;
+
+ err = create_con_cq_qp(con);
+ if (err) {
+ rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
+ return err;
+ }
+ err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Resolving route failed, err: %d\n", err);
+ destroy_con_cq_qp(con);
+ }
+
+ return err;
+}
+
+static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_msg_conn_req msg;
+ struct rdma_conn_param param;
+
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .retry_count = 7,
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_req) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .cid = cpu_to_le16(con->c.cid),
+ .cid_num = cpu_to_le16(sess->s.con_num),
+ .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
+ };
+ uuid_copy(&msg.sess_uuid, &sess->s.uuid);
+ uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
+
+ err = rdma_connect(con->c.cm_id, &param);
+ if (err)
+ rtrs_err(clt, "rdma_connect(): %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt *clt = sess->clt;
+ const struct rtrs_msg_conn_rsp *msg;
+ u16 version, queue_depth;
+ int errno;
+ u8 len;
+
+ msg = ev->param.conn.private_data;
+ len = ev->param.conn.private_data_len;
+ if (len < sizeof(*msg)) {
+ rtrs_err(clt, "Invalid RTRS connection response\n");
+ return -ECONNRESET;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ rtrs_err(clt, "Invalid RTRS magic\n");
+ return -ECONNRESET;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ return -ECONNRESET;
+ }
+ errno = le16_to_cpu(msg->errno);
+ if (errno) {
+ rtrs_err(clt, "Invalid RTRS message: errno %d\n",
+ errno);
+ return -ECONNRESET;
+ }
+ if (con->c.cid == 0) {
+ queue_depth = le16_to_cpu(msg->queue_depth);
+
+ if (queue_depth > MAX_SESS_QUEUE_DEPTH) {
+ rtrs_err(clt, "Invalid RTRS message: queue=%d\n",
+ queue_depth);
+ return -ECONNRESET;
+ }
+ if (!sess->rbufs || sess->queue_depth < queue_depth) {
+ kfree(sess->rbufs);
+ sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
+ GFP_KERNEL);
+ if (!sess->rbufs)
+ return -ENOMEM;
+ }
+ sess->queue_depth = queue_depth;
+ sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
+ sess->max_io_size = le32_to_cpu(msg->max_io_size);
+ sess->flags = le32_to_cpu(msg->flags);
+ sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
+
+ /*
+ * Global queue depth and IO size is always a minimum.
+ * If while a reconnection server sends us a value a bit
+ * higher - client does not care and uses cached minimum.
+ *
+ * Since we can have several sessions (paths) restablishing
+ * connections in parallel, use lock.
+ */
+ mutex_lock(&clt->paths_mutex);
+ clt->queue_depth = min_not_zero(sess->queue_depth,
+ clt->queue_depth);
+ clt->max_io_size = min_not_zero(sess->max_io_size,
+ clt->max_io_size);
+ mutex_unlock(&clt->paths_mutex);
+
+ /*
+ * Cache the hca_port and hca_name for sysfs
+ */
+ sess->hca_port = con->c.cm_id->port_num;
+ scnprintf(sess->hca_name, sizeof(sess->hca_name),
+ sess->s.dev->ib_dev->name);
+ sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ }
+
+ return 0;
+}
+
+static inline void flag_success_on_conn(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ atomic_inc(&sess->connected_cnt);
+ con->cm_err = 1;
+}
+
+static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_sess *s = con->c.sess;
+ const struct rtrs_msg_conn_rsp *msg;
+ const char *rej_msg;
+ int status, errno;
+ u8 data_len;
+
+ status = ev->status;
+ rej_msg = rdma_reject_msg(con->c.cm_id, status);
+ msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
+
+ if (msg && data_len >= sizeof(*msg)) {
+ errno = (int16_t)le16_to_cpu(msg->errno);
+ if (errno == -EBUSY)
+ rtrs_err(s,
+ "Previous session is still exists on the server, please reconnect later\n");
+ else
+ rtrs_err(s,
+ "Connect rejected: status %d (%s), rtrs errno %d\n",
+ status, rej_msg, errno);
+ } else {
+ rtrs_err(s,
+ "Connect rejected but with malformed message: status %d (%s)\n",
+ status, rej_msg);
+ }
+
+ return -ECONNRESET;
+}
+
+static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
+{
+ if (rtrs_clt_change_state(sess, RTRS_CLT_CLOSING))
+ queue_work(rtrs_wq, &sess->close_work);
+ if (wait)
+ flush_work(&sess->close_work);
+}
+
+static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
+{
+ if (con->cm_err == 1) {
+ struct rtrs_clt_sess *sess;
+
+ sess = to_clt_sess(con->c.sess);
+ if (atomic_dec_and_test(&sess->connected_cnt))
+
+ wake_up(&sess->state_wq);
+ }
+ con->cm_err = cm_err;
+}
+
+static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_con *con = cm_id->context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ int cm_err = 0;
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ cm_err = rtrs_rdma_addr_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ cm_err = rtrs_rdma_route_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ con->cm_err = rtrs_rdma_conn_established(con, ev);
+ if (likely(!con->cm_err)) {
+ /*
+ * Report success and wake up. Here we abuse state_wq,
+ * i.e. wake up without state change, but we set cm_err.
+ */
+ flag_success_on_conn(con);
+ wake_up(&sess->state_wq);
+ return 0;
+ }
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ cm_err = rtrs_rdma_conn_rejected(con, ev);
+ break;
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ rtrs_wrn(s, "CM error event %d\n", ev->event);
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ cm_err = -EHOSTUNREACH;
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /*
+ * Device removal is a special case. Queue close and return 0.
+ */
+ rtrs_clt_close_conns(sess, false);
+ return 0;
+ default:
+ rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event);
+ cm_err = -ECONNRESET;
+ break;
+ }
+
+ if (cm_err) {
+ /*
+ * cm error makes sense only on connection establishing,
+ * in other cases we rely on normal procedure of reconnecting.
+ */
+ flag_error_on_conn(con, cm_err);
+ rtrs_rdma_error_recovery(con);
+ }
+
+ return 0;
+}
+
+static int create_cm(struct rtrs_clt_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rdma_cm_id *cm_id;
+ int err;
+
+ cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
+ sess->s.dst_addr.ss_family == AF_IB ?
+ RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ err = PTR_ERR(cm_id);
+ rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
+
+ return err;
+ }
+ con->c.cm_id = cm_id;
+ con->cm_err = 0;
+ /* allow the port to be reused */
+ err = rdma_set_reuseaddr(cm_id, 1);
+ if (err != 0) {
+ rtrs_err(s, "Set address reuse failed, err: %d\n", err);
+ goto destroy_cm;
+ }
+ err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
+ (struct sockaddr *)&sess->s.dst_addr,
+ RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Failed to resolve address, err: %d\n", err);
+ goto destroy_cm;
+ }
+ /*
+ * Combine connection status and session events. This is needed
+ * for waiting two possible cases: cm_err has something meaningful
+ * or session state was really changed to error by device removal.
+ */
+ err = wait_event_interruptible_timeout(
+ sess->state_wq,
+ con->cm_err || sess->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+ if (err == 0 || err == -ERESTARTSYS) {
+ if (err == 0)
+ err = -ETIMEDOUT;
+ /* Timedout or interrupted */
+ goto errr;
+ }
+ if (con->cm_err < 0) {
+ err = con->cm_err;
+ goto errr;
+ }
+ if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
+ /* Device removal */
+ err = -ECONNABORTED;
+ goto errr;
+ }
+
+ return 0;
+
+errr:
+ stop_cm(con);
+ /* Is safe to call destroy if cq_qp is not inited */
+ destroy_con_cq_qp(con);
+destroy_cm:
+ destroy_cm(con);
+
+ return err;
+}
+
+static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ int up;
+
+ /*
+ * We can fire RECONNECTED event only when all paths were
+ * connected on rtrs_clt_open(), then each was disconnected
+ * and the first one connected again. That's why this nasty
+ * game with counter value.
+ */
+
+ mutex_lock(&clt->paths_ev_mutex);
+ up = ++clt->paths_up;
+ /*
+ * Here it is safe to access paths num directly since up counter
+ * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
+ * in progress, thus paths removals are impossible.
+ */
+ if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
+ clt->paths_up = clt->paths_num;
+ else if (up == 1)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+
+ /* Mark session as established */
+ sess->established = true;
+ sess->reconnect_attempts = 0;
+ sess->stats->reconnects.successful_cnt++;
+}
+
+static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+
+ if (!sess->established)
+ return;
+
+ sess->established = false;
+ mutex_lock(&clt->paths_ev_mutex);
+ WARN_ON(!clt->paths_up);
+ if (--clt->paths_up == 0)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+}
+
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_con *con;
+ unsigned int cid;
+
+ WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
+
+ /*
+ * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
+ * exactly in between. Start destroying after it finishes.
+ */
+ mutex_lock(&sess->init_mutex);
+ mutex_unlock(&sess->init_mutex);
+
+ /*
+ * All IO paths must observe !CONNECTED state before we
+ * free everything.
+ */
+ synchronize_rcu();
+
+ rtrs_clt_stop_hb(sess);
+
+ /*
+ * The order it utterly crucial: firstly disconnect and complete all
+ * rdma requests with error (thus set in_use=false for requests),
+ * then fail outstanding requests checking in_use for each, and
+ * eventually notify upper layer about session disconnection.
+ */
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (!sess->s.con[cid])
+ break;
+ con = to_clt_con(sess->s.con[cid]);
+ stop_cm(con);
+ }
+ fail_all_outstanding_reqs(sess);
+ free_sess_reqs(sess);
+ rtrs_clt_sess_down(sess);
+
+ /*
+ * Wait for graceful shutdown, namely when peer side invokes
+ * rdma_disconnect(). 'connected_cnt' is decremented only on
+ * CM events, thus if other side had crashed and hb has detected
+ * something is wrong, here we will stuck for exactly timeout ms,
+ * since CM does not fire anything. That is fine, we are not in
+ * hurry.
+ */
+ wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (!sess->s.con[cid])
+ break;
+ con = to_clt_con(sess->s.con[cid]);
+ destroy_con_cq_qp(con);
+ destroy_cm(con);
+ destroy_con(con);
+ }
+}
+
+static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
+ struct rtrs_clt_sess *sess,
+ struct rtrs_clt_sess *next)
+{
+ struct rtrs_clt_sess **ppcpu_path;
+
+ /* Call cmpxchg() without sparse warnings */
+ ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
+ return sess == cmpxchg(ppcpu_path, sess, next);
+}
+
+static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *next;
+ bool wait_for_grace = false;
+ int cpu;
+
+ mutex_lock(&clt->paths_mutex);
+ list_del_rcu(&sess->s.entry);
+
+ /* Make sure everybody observes path removal. */
+ synchronize_rcu();
+
+ /*
+ * At this point nobody sees @sess in the list, but still we have
+ * dangling pointer @pcpu_path which _can_ point to @sess. Since
+ * nobody can observe @sess in the list, we guarantee that IO path
+ * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
+ * to @sess, but can never again become @sess.
+ */
+
+ /*
+ * Decrement paths number only after grace period, because
+ * caller of do_each_path() must firstly observe list without
+ * path and only then decremented paths number.
+ *
+ * Otherwise there can be the following situation:
+ * o Two paths exist and IO is coming.
+ * o One path is removed:
+ * CPU#0 CPU#1
+ * do_each_path(): rtrs_clt_remove_path_from_arr():
+ * path = get_next_path()
+ * ^^^ list_del_rcu(path)
+ * [!CONNECTED path] clt->paths_num--
+ * ^^^^^^^^^
+ * load clt->paths_num from 2 to 1
+ * ^^^^^^^^^
+ * sees 1
+ *
+ * path is observed as !CONNECTED, but do_each_path() loop
+ * ends, because expression i < clt->paths_num is false.
+ */
+ clt->paths_num--;
+
+ /*
+ * Get @next connection from current @sess which is going to be
+ * removed. If @sess is the last element, then @next is NULL.
+ */
+ rcu_read_lock();
+ next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
+ typeof(*next), s.entry);
+ rcu_read_unlock();
+
+ /*
+ * @pcpu paths can still point to the path which is going to be
+ * removed, so change the pointer manually.
+ */
+ for_each_possible_cpu(cpu) {
+ struct rtrs_clt_sess __rcu **ppcpu_path;
+
+ ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
+ if (rcu_dereference_protected(*ppcpu_path,
+ lockdep_is_held(&clt->paths_mutex)) != sess)
+ /*
+ * synchronize_rcu() was called just after deleting
+ * entry from the list, thus IO code path cannot
+ * change pointer back to the pointer which is going
+ * to be removed, we are safe here.
+ */
+ continue;
+
+ /*
+ * We race with IO code path, which also changes pointer,
+ * thus we have to be careful not to overwrite it.
+ */
+ if (xchg_sessions(ppcpu_path, sess, next))
+ /*
+ * @ppcpu_path was successfully replaced with @next,
+ * that means that someone could also pick up the
+ * @sess and dereferencing it right now, so wait for
+ * a grace period is required.
+ */
+ wait_for_grace = true;
+ }
+ if (wait_for_grace)
+ synchronize_rcu();
+
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess,
+ struct rtrs_addr *addr)
+{
+ struct rtrs_clt *clt = sess->clt;
+
+ mutex_lock(&clt->paths_mutex);
+ clt->paths_num++;
+
+ list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_close_work(struct work_struct *work)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(work, struct rtrs_clt_sess, close_work);
+
+ cancel_delayed_work_sync(&sess->reconnect_dwork);
+ rtrs_clt_stop_and_destroy_conns(sess);
+ rtrs_clt_change_state(sess, RTRS_CLT_CLOSED);
+}
+
+static int init_conns(struct rtrs_clt_sess *sess)
+{
+ unsigned int cid;
+ int err;
+
+ /*
+ * On every new session connections increase reconnect counter
+ * to avoid clashes with previous sessions not yet closed
+ * sessions on a server side.
+ */
+ sess->s.recon_cnt++;
+
+ /* Establish all RDMA connections */
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ err = create_con(sess, cid);
+ if (err)
+ goto destroy;
+
+ err = create_cm(to_clt_con(sess->s.con[cid]));
+ if (err) {
+ destroy_con(to_clt_con(sess->s.con[cid]));
+ goto destroy;
+ }
+ }
+ err = alloc_sess_reqs(sess);
+ if (err)
+ goto destroy;
+
+ rtrs_clt_start_hb(sess);
+
+ return 0;
+
+destroy:
+ while (cid--) {
+ struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
+
+ stop_cm(con);
+ destroy_con_cq_qp(con);
+ destroy_cm(con);
+ destroy_con(con);
+ }
+ /*
+ * If we've never taken async path and got an error, say,
+ * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
+ * manually to keep reconnecting.
+ */
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+
+ return err;
+}
+
+static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(sess->clt, "Sess info request send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+ return;
+ }
+
+ rtrs_clt_update_wc_stats(con);
+}
+
+static int process_info_rsp(struct rtrs_clt_sess *sess,
+ const struct rtrs_msg_info_rsp *msg)
+{
+ unsigned int sg_cnt, total_len;
+ int i, sgi;
+
+ sg_cnt = le16_to_cpu(msg->sg_cnt);
+ if (unlikely(!sg_cnt))
+ return -EINVAL;
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and
+ * the offset inside the memory chunk.
+ */
+ if (unlikely((ilog2(sg_cnt - 1) + 1) +
+ (ilog2(sess->chunk_size - 1) + 1) >
+ MAX_IMM_PAYL_BITS)) {
+ rtrs_err(sess->clt,
+ "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
+ MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
+ return -EINVAL;
+ }
+ if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
+ rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
+ sg_cnt);
+ return -EINVAL;
+ }
+ total_len = 0;
+ for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
+ const struct rtrs_sg_desc *desc = &msg->desc[sgi];
+ u32 len, rkey;
+ u64 addr;
+
+ addr = le64_to_cpu(desc->addr);
+ rkey = le32_to_cpu(desc->key);
+ len = le32_to_cpu(desc->len);
+
+ total_len += len;
+
+ if (unlikely(!len || (len % sess->chunk_size))) {
+ rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
+ len);
+ return -EINVAL;
+ }
+ for ( ; len && i < sess->queue_depth; i++) {
+ sess->rbufs[i].addr = addr;
+ sess->rbufs[i].rkey = rkey;
+
+ len -= sess->chunk_size;
+ addr += sess->chunk_size;
+ }
+ }
+ /* Sanity check */
+ if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) {
+ rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
+ return -EINVAL;
+ }
+ if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) {
+ rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_msg_info_rsp *msg;
+ enum rtrs_clt_state state;
+ struct rtrs_iu *iu;
+ size_t rx_sz;
+ int err;
+
+ state = RTRS_CLT_CONNECTING_ERR;
+
+ WARN_ON(con->c.cid);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto out;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) {
+ rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ rx_sz = sizeof(*msg);
+ rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
+ if (unlikely(wc->byte_len < rx_sz)) {
+ rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ err = process_info_rsp(sess, msg);
+ if (unlikely(err))
+ goto out;
+
+ err = post_recv_sess(sess);
+ if (unlikely(err))
+ goto out;
+
+ state = RTRS_CLT_CONNECTED;
+
+out:
+ rtrs_clt_update_wc_stats(con);
+ rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_clt_change_state(sess, state);
+}
+
+static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *tx_iu, *rx_iu;
+ size_t rx_sz;
+ int err;
+
+ rx_sz = sizeof(struct rtrs_msg_info_rsp);
+ rx_sz += sizeof(u64) * MAX_SESS_QUEUE_DEPTH;
+
+ tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
+ sess->s.dev->ib_dev, DMA_TO_DEVICE,
+ rtrs_clt_info_req_done);
+ rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
+ if (unlikely(!tx_iu || !rx_iu)) {
+ err = -ENOMEM;
+ goto out;
+ }
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
+ goto out;
+ }
+ rx_iu = NULL;
+
+ msg = tx_iu->buf;
+ msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
+ memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info request */
+ err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
+ goto out;
+ }
+ tx_iu = NULL;
+
+ /* Wait for state change */
+ wait_event_interruptible_timeout(sess->state_wq,
+ sess->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(
+ RTRS_CONNECT_TIMEOUT_MS));
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) {
+ if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
+ err = -ECONNRESET;
+ else
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+out:
+ if (tx_iu)
+ rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ if (rx_iu)
+ rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ if (unlikely(err))
+ /* If we've never taken async path because of malloc problems */
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+
+ return err;
+}
+
+/**
+ * init_sess() - establishes all session connections and does handshake
+ * @sess: client session.
+ * In case of error full close or reconnect procedure should be taken,
+ * because reconnect or close async works can be started.
+ */
+static int init_sess(struct rtrs_clt_sess *sess)
+{
+ int err;
+
+ mutex_lock(&sess->init_mutex);
+ err = init_conns(sess);
+ if (err) {
+ rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
+ goto out;
+ }
+ err = rtrs_send_sess_info(sess);
+ if (err) {
+ rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
+ goto out;
+ }
+ rtrs_clt_sess_up(sess);
+out:
+ mutex_unlock(&sess->init_mutex);
+
+ return err;
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work)
+{
+ struct rtrs_clt_sess *sess;
+ struct rtrs_clt *clt;
+ unsigned int delay_ms;
+ int err;
+
+ sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
+ reconnect_dwork);
+ clt = sess->clt;
+
+ if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
+ return;
+
+ if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
+ /* Close a session completely if max attempts is reached */
+ rtrs_clt_close_conns(sess, false);
+ return;
+ }
+ sess->reconnect_attempts++;
+
+ /* Stop everything */
+ rtrs_clt_stop_and_destroy_conns(sess);
+ msleep(RTRS_RECONNECT_BACKOFF);
+ if (rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING)) {
+ err = init_sess(sess);
+ if (err)
+ goto reconnect_again;
+ }
+
+ return;
+
+reconnect_again:
+ if (rtrs_clt_change_state(sess, RTRS_CLT_RECONNECTING)) {
+ sess->stats->reconnects.fail_cnt++;
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ msecs_to_jiffies(delay_ms));
+ }
+}
+
+static void rtrs_clt_dev_release(struct device *dev)
+{
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ kfree(clt);
+}
+
+static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+ u16 port, size_t pdu_sz, void *priv,
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev),
+ unsigned int max_segments,
+ size_t max_segment_size,
+ unsigned int reconnect_delay_sec,
+ unsigned int max_reconnect_attempts)
+{
+ struct rtrs_clt *clt;
+ int err;
+
+ if (!paths_num || paths_num > MAX_PATHS_NUM)
+ return ERR_PTR(-EINVAL);
+
+ if (strlen(sessname) >= sizeof(clt->sessname))
+ return ERR_PTR(-EINVAL);
+
+ clt = kzalloc(sizeof(*clt), GFP_KERNEL);
+ if (!clt)
+ return ERR_PTR(-ENOMEM);
+
+ clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
+ if (!clt->pcpu_path) {
+ kfree(clt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ uuid_gen(&clt->paths_uuid);
+ INIT_LIST_HEAD_RCU(&clt->paths_list);
+ clt->paths_num = paths_num;
+ clt->paths_up = MAX_PATHS_NUM;
+ clt->port = port;
+ clt->pdu_sz = pdu_sz;
+ clt->max_segments = max_segments;
+ clt->max_segment_size = max_segment_size;
+ clt->reconnect_delay_sec = reconnect_delay_sec;
+ clt->max_reconnect_attempts = max_reconnect_attempts;
+ clt->priv = priv;
+ clt->link_ev = link_ev;
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ strlcpy(clt->sessname, sessname, sizeof(clt->sessname));
+ init_waitqueue_head(&clt->permits_wait);
+ mutex_init(&clt->paths_ev_mutex);
+ mutex_init(&clt->paths_mutex);
+
+ clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.release = rtrs_clt_dev_release;
+ err = dev_set_name(&clt->dev, "%s", sessname);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ kfree(clt);
+ return ERR_PTR(err);
+ }
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&clt->dev, true);
+ err = device_register(&clt->dev);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ put_device(&clt->dev);
+ return ERR_PTR(err);
+ }
+
+ clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
+ if (!clt->kobj_paths) {
+ free_percpu(clt->pcpu_path);
+ device_unregister(&clt->dev);
+ return NULL;
+ }
+ err = rtrs_clt_create_sysfs_root_files(clt);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ device_unregister(&clt->dev);
+ return ERR_PTR(err);
+ }
+ dev_set_uevent_suppress(&clt->dev, false);
+ kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
+
+ return clt;
+}
+
+static void wait_for_inflight_permits(struct rtrs_clt *clt)
+{
+ if (clt->permits_map) {
+ size_t sz = clt->queue_depth;
+
+ wait_event(clt->permits_wait,
+ find_first_bit(clt->permits_map, sz) >= sz);
+ }
+}
+
+static void free_clt(struct rtrs_clt *clt)
+{
+ wait_for_inflight_permits(clt);
+ free_permits(clt);
+ free_percpu(clt->pcpu_path);
+ mutex_destroy(&clt->paths_ev_mutex);
+ mutex_destroy(&clt->paths_mutex);
+ /* release callback will free clt in last put */
+ device_unregister(&clt->dev);
+}
+
+/**
+ * rtrs_clt_open() - Open a session to an RTRS server
+ * @ops: holds the link event callback and the private pointer.
+ * @sessname: name of the session
+ * @paths: Paths to be established defined by their src and dst addresses
+ * @paths_num: Number of elements in the @paths array
+ * @port: port to be used by the RTRS session
+ * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
+ * @reconnect_delay_sec: time between reconnect tries
+ * @max_segments: Max. number of segments per IO request
+ * @max_segment_size: Max. size of one segment
+ * @max_reconnect_attempts: Number of times to reconnect on error before giving
+ * up, 0 for * disabled, -1 for forever
+ *
+ * Starts session establishment with the rtrs_server. The function can block
+ * up to ~2000ms before it returns.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t paths_num, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ u16 max_segments,
+ size_t max_segment_size,
+ s16 max_reconnect_attempts)
+{
+ struct rtrs_clt_sess *sess, *tmp;
+ struct rtrs_clt *clt;
+ int err, i;
+
+ clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
+ ops->link_ev,
+ max_segments, max_segment_size, reconnect_delay_sec,
+ max_reconnect_attempts);
+ if (IS_ERR(clt)) {
+ err = PTR_ERR(clt);
+ goto out;
+ }
+ for (i = 0; i < paths_num; i++) {
+ struct rtrs_clt_sess *sess;
+
+ sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
+ max_segments, max_segment_size);
+ if (IS_ERR(sess)) {
+ err = PTR_ERR(sess);
+ goto close_all_sess;
+ }
+ list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+
+ err = init_sess(sess);
+ if (err) {
+ list_del_rcu(&sess->s.entry);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+ goto close_all_sess;
+ }
+
+ err = rtrs_clt_create_sess_files(sess);
+ if (err) {
+ list_del_rcu(&sess->s.entry);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+ goto close_all_sess;
+ }
+ }
+ err = alloc_permits(clt);
+ if (err)
+ goto close_all_sess;
+
+ return clt;
+
+close_all_sess:
+ list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_sess_files(sess, NULL);
+ rtrs_clt_close_conns(sess, true);
+ kobject_put(&sess->kobj);
+ }
+ rtrs_clt_destroy_sysfs_root_files(clt);
+ rtrs_clt_destroy_sysfs_root_folders(clt);
+ free_clt(clt);
+
+out:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(rtrs_clt_open);
+
+/**
+ * rtrs_clt_close() - Close a session
+ * @clt: Session handle. Session is freed upon return.
+ */
+void rtrs_clt_close(struct rtrs_clt *clt)
+{
+ struct rtrs_clt_sess *sess, *tmp;
+
+ /* Firstly forbid sysfs access */
+ rtrs_clt_destroy_sysfs_root_files(clt);
+ rtrs_clt_destroy_sysfs_root_folders(clt);
+
+ /* Now it is safe to iterate over all paths without locks */
+ list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_sess_files(sess, NULL);
+ rtrs_clt_close_conns(sess, true);
+ kobject_put(&sess->kobj);
+ }
+ free_clt(clt);
+}
+EXPORT_SYMBOL(rtrs_clt_close);
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
+{
+ enum rtrs_clt_state old_state;
+ int err = -EBUSY;
+ bool changed;
+
+ changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
+ &old_state);
+ if (changed) {
+ sess->reconnect_attempts = 0;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
+ }
+ if (changed || old_state == RTRS_CLT_RECONNECTING) {
+ /*
+ * flush_delayed_work() queues pending work for immediate
+ * execution, so do the flush if we have queued something
+ * right now or work is pending.
+ */
+ flush_delayed_work(&sess->reconnect_dwork);
+ err = (READ_ONCE(sess->state) ==
+ RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
+ }
+
+ return err;
+}
+
+int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess)
+{
+ rtrs_clt_close_conns(sess, true);
+
+ return 0;
+}
+
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self)
+{
+ enum rtrs_clt_state old_state;
+ bool changed;
+
+ /*
+ * Continue stopping path till state was changed to DEAD or
+ * state was observed as DEAD:
+ * 1. State was changed to DEAD - we were fast and nobody
+ * invoked rtrs_clt_reconnect(), which can again start
+ * reconnecting.
+ * 2. State was observed as DEAD - we have someone in parallel
+ * removing the path.
+ */
+ do {
+ rtrs_clt_close_conns(sess, true);
+ changed = rtrs_clt_change_state_get_old(sess,
+ RTRS_CLT_DEAD,
+ &old_state);
+ } while (!changed && old_state != RTRS_CLT_DEAD);
+
+ if (likely(changed)) {
+ rtrs_clt_destroy_sess_files(sess, sysfs_self);
+ rtrs_clt_remove_path_from_arr(sess);
+ kobject_put(&sess->kobj);
+ }
+
+ return 0;
+}
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
+{
+ clt->max_reconnect_attempts = (unsigned int)value;
+}
+
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
+{
+ return (int)clt->max_reconnect_attempts;
+}
+
+/**
+ * rtrs_clt_request() - Request data transfer to/from server via RDMA.
+ *
+ * @dir: READ/WRITE
+ * @ops: callback function to be called as confirmation, and the pointer.
+ * @clt: Session
+ * @permit: Preallocated permit
+ * @vec: Message that is sent to server together with the request.
+ * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
+ * Since the msg is copied internally it can be allocated on stack.
+ * @nr: Number of elements in @vec.
+ * @data_len: length of data sent to/from server
+ * @sg: Pages to be sent/received to/from server.
+ * @sg_cnt: Number of elements in the @sg
+ *
+ * Return:
+ * 0: Success
+ * <0: Error
+ *
+ * On dir=READ rtrs client will request a data transfer from Server to client.
+ * The data that the server will respond with will be stored in @sg when
+ * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
+ * On dir=WRITE rtrs client will rdma write data in sg to server side.
+ */
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt *clt, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t data_len,
+ struct scatterlist *sg, unsigned int sg_cnt)
+{
+ struct rtrs_clt_io_req *req;
+ struct rtrs_clt_sess *sess;
+
+ enum dma_data_direction dma_dir;
+ int err = -ECONNABORTED, i;
+ size_t usr_len, hdr_len;
+ struct path_it it;
+
+ /* Get kvec length */
+ for (i = 0, usr_len = 0; i < nr; i++)
+ usr_len += vec[i].iov_len;
+
+ if (dir == READ) {
+ hdr_len = sizeof(struct rtrs_msg_rdma_read) +
+ sg_cnt * sizeof(struct rtrs_sg_desc);
+ dma_dir = DMA_FROM_DEVICE;
+ } else {
+ hdr_len = sizeof(struct rtrs_msg_rdma_write);
+ dma_dir = DMA_TO_DEVICE;
+ }
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ continue;
+
+ if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) {
+ rtrs_wrn_rl(sess->clt,
+ "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
+ dir == READ ? "Read" : "Write",
+ usr_len, hdr_len, sess->max_hdr_size);
+ err = -EMSGSIZE;
+ break;
+ }
+ req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
+ vec, usr_len, sg, sg_cnt, data_len,
+ dma_dir);
+ if (dir == READ)
+ err = rtrs_clt_read_req(req);
+ else
+ err = rtrs_clt_write_req(req);
+ if (unlikely(err)) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_clt_request);
+
+/**
+ * rtrs_clt_query() - queries RTRS session attributes
+ *@clt: session pointer
+ *@attr: query results for session attributes.
+ * Returns:
+ * 0 on success
+ * -ECOMM no connection to the server
+ */
+int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
+{
+ if (!rtrs_clt_is_connected(clt))
+ return -ECOMM;
+
+ attr->queue_depth = clt->queue_depth;
+ attr->max_io_size = clt->max_io_size;
+ attr->sess_kobj = &clt->dev.kobj;
+ strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname));
+
+ return 0;
+}
+EXPORT_SYMBOL(rtrs_clt_query);
+
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+ struct rtrs_addr *addr)
+{
+ struct rtrs_clt_sess *sess;
+ int err;
+
+ sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments,
+ clt->max_segment_size);
+ if (IS_ERR(sess))
+ return PTR_ERR(sess);
+
+ /*
+ * It is totally safe to add path in CONNECTING state: coming
+ * IO will never grab it. Also it is very important to add
+ * path before init, since init fires LINK_CONNECTED event.
+ */
+ rtrs_clt_add_path_to_arr(sess, addr);
+
+ err = init_sess(sess);
+ if (err)
+ goto close_sess;
+
+ err = rtrs_clt_create_sess_files(sess);
+ if (err)
+ goto close_sess;
+
+ return 0;
+
+close_sess:
+ rtrs_clt_remove_path_from_arr(sess);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+
+ return err;
+}
+
+static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
+{
+ if (!(dev->ib_dev->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ pr_err("Memory registrations not supported.\n");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
+ .init = rtrs_clt_ib_dev_init
+};
+
+static int __init rtrs_client_init(void)
+{
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+
+ rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client");
+ if (IS_ERR(rtrs_clt_dev_class)) {
+ pr_err("Failed to create rtrs-client dev class\n");
+ return PTR_ERR(rtrs_clt_dev_class);
+ }
+ rtrs_wq = alloc_workqueue("rtrs_client_wq", WQ_MEM_RECLAIM, 0);
+ if (!rtrs_wq) {
+ class_destroy(rtrs_clt_dev_class);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit rtrs_client_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_destroy(rtrs_clt_dev_class);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_client_init);
+module_exit(rtrs_client_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
new file mode 100644
index 000000000000..167acd3c90fc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_CLT_H
+#define RTRS_CLT_H
+
+#include <linux/device.h>
+#include "rtrs-pri.h"
+
+/**
+ * enum rtrs_clt_state - Client states.
+ */
+enum rtrs_clt_state {
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR,
+ RTRS_CLT_RECONNECTING,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_CLOSING,
+ RTRS_CLT_CLOSED,
+ RTRS_CLT_DEAD,
+};
+
+enum rtrs_mp_policy {
+ MP_POLICY_RR,
+ MP_POLICY_MIN_INFLIGHT,
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_reconnects {
+ int successful_cnt;
+ int fail_cnt;
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_cpu_migr {
+ atomic_t from;
+ int to;
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-client for details
+ */
+struct rtrs_clt_stats_rdma {
+ struct {
+ u64 cnt;
+ u64 size_total;
+ } dir[2];
+
+ u64 failover_cnt;
+};
+
+struct rtrs_clt_stats_pcpu {
+ struct rtrs_clt_stats_cpu_migr cpu_migr;
+ struct rtrs_clt_stats_rdma rdma;
+};
+
+struct rtrs_clt_stats {
+ struct kobject kobj_stats;
+ struct rtrs_clt_stats_pcpu __percpu *pcpu_stats;
+ struct rtrs_clt_stats_reconnects reconnects;
+ atomic_t inflight;
+};
+
+struct rtrs_clt_con {
+ struct rtrs_con c;
+ struct rtrs_iu *rsp_ius;
+ u32 queue_size;
+ unsigned int cpu;
+ atomic_t io_cnt;
+ int cm_err;
+};
+
+/**
+ * rtrs_permit - permits the memory allocation for future RDMA operation.
+ * Combine with irq pinning to keep IO on same CPU.
+ */
+struct rtrs_permit {
+ enum rtrs_clt_con_type con_type;
+ unsigned int cpu_id;
+ unsigned int mem_id;
+ unsigned int mem_off;
+};
+
+/**
+ * rtrs_clt_io_req - describes one inflight IO request
+ */
+struct rtrs_clt_io_req {
+ struct list_head list;
+ struct rtrs_iu *iu;
+ struct scatterlist *sglist; /* list holding user data */
+ unsigned int sg_cnt;
+ unsigned int sg_size;
+ unsigned int data_len;
+ unsigned int usr_len;
+ void *priv;
+ bool in_use;
+ struct rtrs_clt_con *con;
+ struct rtrs_sg_desc *desc;
+ struct ib_sge *sge;
+ struct rtrs_permit *permit;
+ enum dma_data_direction dir;
+ void (*conf)(void *priv, int errno);
+ unsigned long start_jiffies;
+
+ struct ib_mr *mr;
+ struct ib_cqe inv_cqe;
+ struct completion inv_comp;
+ int inv_errno;
+ bool need_inv_comp;
+ bool need_inv;
+};
+
+struct rtrs_rbuf {
+ u64 addr;
+ u32 rkey;
+};
+
+struct rtrs_clt_sess {
+ struct rtrs_sess s;
+ struct rtrs_clt *clt;
+ wait_queue_head_t state_wq;
+ enum rtrs_clt_state state;
+ atomic_t connected_cnt;
+ struct mutex init_mutex;
+ struct rtrs_clt_io_req *reqs;
+ struct delayed_work reconnect_dwork;
+ struct work_struct close_work;
+ unsigned int reconnect_attempts;
+ bool established;
+ struct rtrs_rbuf *rbufs;
+ size_t max_io_size;
+ u32 max_hdr_size;
+ u32 chunk_size;
+ size_t queue_depth;
+ u32 max_pages_per_mr;
+ int max_send_sge;
+ u32 flags;
+ struct kobject kobj;
+ struct rtrs_clt_stats *stats;
+ /* cache hca_port and hca_name to display in sysfs */
+ u8 hca_port;
+ char hca_name[IB_DEVICE_NAME_MAX];
+ struct list_head __percpu
+ *mp_skip_entry;
+};
+
+struct rtrs_clt {
+ struct list_head paths_list; /* rcu protected list */
+ size_t paths_num;
+ struct rtrs_clt_sess
+ __rcu * __percpu *pcpu_path;
+ uuid_t paths_uuid;
+ int paths_up;
+ struct mutex paths_mutex;
+ struct mutex paths_ev_mutex;
+ char sessname[NAME_MAX];
+ u16 port;
+ unsigned int max_reconnect_attempts;
+ unsigned int reconnect_delay_sec;
+ unsigned int max_segments;
+ size_t max_segment_size;
+ void *permits;
+ unsigned long *permits_map;
+ size_t queue_depth;
+ size_t max_io_size;
+ wait_queue_head_t permits_wait;
+ size_t pdu_sz;
+ void *priv;
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev);
+ struct device dev;
+ struct kobject *kobj_paths;
+ enum rtrs_mp_policy mp_policy;
+};
+
+static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_clt_con, c);
+}
+
+static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s)
+{
+ return container_of(s, struct rtrs_clt_sess, s);
+}
+
+static inline int permit_size(struct rtrs_clt *clt)
+{
+ return sizeof(struct rtrs_permit) + clt->pdu_sz;
+}
+
+static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx)
+{
+ return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
+}
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess);
+int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess);
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+ struct rtrs_addr *addr);
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self);
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value);
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt);
+void free_sess(struct rtrs_clt_sess *sess);
+
+/* rtrs-clt-stats.c */
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats);
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *s);
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con);
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir);
+
+int rtrs_clt_reset_rdma_lat_distr_stats(struct rtrs_clt_stats *stats,
+ bool enable);
+ssize_t rtrs_clt_stats_rdma_lat_distr_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_wc_comp_stats(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_wc_completion_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+
+/* rtrs-clt-sysfs.c */
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
+void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt);
+void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt);
+
+int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
+void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self);
+
+#endif /* RTRS_CLT_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-log.h b/drivers/infiniband/ulp/rtrs/rtrs-log.h
new file mode 100644
index 000000000000..53c785b992f2
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-log.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_LOG_H
+#define RTRS_LOG_H
+
+#define rtrs_log(fn, obj, fmt, ...) \
+ fn("<%s>: " fmt, obj->sessname, ##__VA_ARGS__)
+
+#define rtrs_err(obj, fmt, ...) \
+ rtrs_log(pr_err, obj, fmt, ##__VA_ARGS__)
+#define rtrs_err_rl(obj, fmt, ...) \
+ rtrs_log(pr_err_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn(obj, fmt, ...) \
+ rtrs_log(pr_warn, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn_rl(obj, fmt, ...) \
+ rtrs_log(pr_warn_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info(obj, fmt, ...) \
+ rtrs_log(pr_info, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info_rl(obj, fmt, ...) \
+ rtrs_log(pr_info_ratelimited, obj, fmt, ##__VA_ARGS__)
+
+#endif /* RTRS_LOG_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
new file mode 100644
index 000000000000..0a93c87ef92b
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_PRI_H
+#define RTRS_PRI_H
+
+#include <linux/uuid.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib.h>
+
+#include "rtrs.h"
+
+#define RTRS_PROTO_VER_MAJOR 2
+#define RTRS_PROTO_VER_MINOR 0
+
+#define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \
+ __stringify(RTRS_PROTO_VER_MINOR)
+
+enum rtrs_imm_const {
+ MAX_IMM_TYPE_BITS = 4,
+ MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1),
+ MAX_IMM_PAYL_BITS = 28,
+ MAX_IMM_PAYL_MASK = ((1 << MAX_IMM_PAYL_BITS) - 1),
+};
+
+enum rtrs_imm_type {
+ RTRS_IO_REQ_IMM = 0, /* client to server */
+ RTRS_IO_RSP_IMM = 1, /* server to client */
+ RTRS_IO_RSP_W_INV_IMM = 2, /* server to client */
+
+ RTRS_HB_MSG_IMM = 8, /* HB: HeartBeat */
+ RTRS_HB_ACK_IMM = 9,
+
+ RTRS_LAST_IMM,
+};
+
+enum {
+ SERVICE_CON_QUEUE_DEPTH = 512,
+
+ MAX_PATHS_NUM = 128,
+
+ /*
+ * With the size of struct rtrs_permit allocated on the client, 4K
+ * is the maximum number of rtrs_permits we can allocate. This number is
+ * also used on the client to allocate the IU for the user connection
+ * to receive the RDMA addresses from the server.
+ */
+ MAX_SESS_QUEUE_DEPTH = 4096,
+
+ RTRS_HB_INTERVAL_MS = 5000,
+ RTRS_HB_MISSED_MAX = 5,
+
+ RTRS_MAGIC = 0x1BBD,
+ RTRS_PROTO_VER = (RTRS_PROTO_VER_MAJOR << 8) | RTRS_PROTO_VER_MINOR,
+};
+
+struct rtrs_ib_dev;
+
+struct rtrs_rdma_dev_pd_ops {
+ struct rtrs_ib_dev *(*alloc)(void);
+ void (*free)(struct rtrs_ib_dev *dev);
+ int (*init)(struct rtrs_ib_dev *dev);
+ void (*deinit)(struct rtrs_ib_dev *dev);
+};
+
+struct rtrs_rdma_dev_pd {
+ struct mutex mutex;
+ struct list_head list;
+ enum ib_pd_flags pd_flags;
+ const struct rtrs_rdma_dev_pd_ops *ops;
+};
+
+struct rtrs_ib_dev {
+ struct ib_device *ib_dev;
+ struct ib_pd *ib_pd;
+ struct kref ref;
+ struct list_head entry;
+ struct rtrs_rdma_dev_pd *pool;
+};
+
+struct rtrs_con {
+ struct rtrs_sess *sess;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct rdma_cm_id *cm_id;
+ unsigned int cid;
+};
+
+struct rtrs_sess {
+ struct list_head entry;
+ struct sockaddr_storage dst_addr;
+ struct sockaddr_storage src_addr;
+ char sessname[NAME_MAX];
+ uuid_t uuid;
+ struct rtrs_con **con;
+ unsigned int con_num;
+ unsigned int recon_cnt;
+ struct rtrs_ib_dev *dev;
+ int dev_ref;
+ struct ib_cqe *hb_cqe;
+ void (*hb_err_handler)(struct rtrs_con *con);
+ struct workqueue_struct *hb_wq;
+ struct delayed_work hb_dwork;
+ unsigned int hb_interval_ms;
+ unsigned int hb_missed_cnt;
+ unsigned int hb_missed_max;
+};
+
+/* rtrs information unit */
+struct rtrs_iu {
+ struct list_head list;
+ struct ib_cqe cqe;
+ dma_addr_t dma_addr;
+ void *buf;
+ size_t size;
+ enum dma_data_direction direction;
+};
+
+/**
+ * enum rtrs_msg_types - RTRS message types, see also rtrs/README
+ * @RTRS_MSG_INFO_REQ: Client additional info request to the server
+ * @RTRS_MSG_INFO_RSP: Server additional info response to the client
+ * @RTRS_MSG_WRITE: Client writes data per RDMA to server
+ * @RTRS_MSG_READ: Client requests data transfer from server
+ * @RTRS_MSG_RKEY_RSP: Server refreshed rkey for rbuf
+ */
+enum rtrs_msg_types {
+ RTRS_MSG_INFO_REQ,
+ RTRS_MSG_INFO_RSP,
+ RTRS_MSG_WRITE,
+ RTRS_MSG_READ,
+ RTRS_MSG_RKEY_RSP,
+};
+
+/**
+ * enum rtrs_msg_flags - RTRS message flags.
+ * @RTRS_NEED_INVAL: Send invalidation in response.
+ * @RTRS_MSG_NEW_RKEY_F: Send refreshed rkey in response.
+ */
+enum rtrs_msg_flags {
+ RTRS_MSG_NEED_INVAL_F = 1 << 0,
+ RTRS_MSG_NEW_RKEY_F = 1 << 1,
+};
+
+/**
+ * struct rtrs_sg_desc - RDMA-Buffer entry description
+ * @addr: Address of RDMA destination buffer
+ * @key: Authorization rkey to write to the buffer
+ * @len: Size of the buffer
+ */
+struct rtrs_sg_desc {
+ __le64 addr;
+ __le32 key;
+ __le32 len;
+};
+
+/**
+ * struct rtrs_msg_conn_req - Client connection request to the server
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @cid: Current connection id
+ * @cid_num: Number of connections per session
+ * @recon_cnt: Reconnections counter
+ * @sess_uuid: UUID of a session (path)
+ * @paths_uuid: UUID of a group of sessions (paths)
+ *
+ * NOTE: max size 56 bytes, see man rdma_connect().
+ */
+struct rtrs_msg_conn_req {
+ /* Is set to 0 by cma.c in case of AF_IB, do not touch that.
+ * see https://www.spinics.net/lists/linux-rdma/msg22397.html
+ */
+ u8 __cma_version;
+ /* On sender side that should be set to 0, or cma_save_ip_info()
+ * extract garbage and will fail.
+ */
+ u8 __ip_version;
+ __le16 magic;
+ __le16 version;
+ __le16 cid;
+ __le16 cid_num;
+ __le16 recon_cnt;
+ uuid_t sess_uuid;
+ uuid_t paths_uuid;
+ u8 reserved[12];
+};
+
+/**
+ * struct rtrs_msg_conn_rsp - Server connection response to the client
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @errno: If rdma_accept() then 0, if rdma_reject() indicates error
+ * @queue_depth: max inflight messages (queue-depth) in this session
+ * @max_io_size: max io size server supports
+ * @max_hdr_size: max msg header size server supports
+ *
+ * NOTE: size is 56 bytes, max possible is 136 bytes, see man rdma_accept().
+ */
+struct rtrs_msg_conn_rsp {
+ __le16 magic;
+ __le16 version;
+ __le16 errno;
+ __le16 queue_depth;
+ __le32 max_io_size;
+ __le32 max_hdr_size;
+ __le32 flags;
+ u8 reserved[36];
+};
+
+/**
+ * struct rtrs_msg_info_req
+ * @type: @RTRS_MSG_INFO_REQ
+ * @sessname: Session name chosen by client
+ */
+struct rtrs_msg_info_req {
+ __le16 type;
+ u8 sessname[NAME_MAX];
+ u8 reserved[15];
+};
+
+/**
+ * struct rtrs_msg_info_rsp
+ * @type: @RTRS_MSG_INFO_RSP
+ * @sg_cnt: Number of @desc entries
+ * @desc: RDMA buffers where the client can write to server
+ */
+struct rtrs_msg_info_rsp {
+ __le16 type;
+ __le16 sg_cnt;
+ u8 reserved[4];
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct rtrs_msg_rkey_rsp
+ * @type: @RTRS_MSG_RKEY_RSP
+ * @buf_id: RDMA buf_id of the new rkey
+ * @rkey: new remote key for RDMA buffers id from server
+ */
+struct rtrs_msg_rkey_rsp {
+ __le16 type;
+ __le16 buf_id;
+ __le32 rkey;
+};
+
+/**
+ * struct rtrs_msg_rdma_read - RDMA data transfer request from client
+ * @type: always @RTRS_MSG_READ
+ * @usr_len: length of user payload
+ * @sg_cnt: number of @desc entries
+ * @desc: RDMA buffers where the server can write the result to
+ */
+struct rtrs_msg_rdma_read {
+ __le16 type;
+ __le16 usr_len;
+ __le16 flags;
+ __le16 sg_cnt;
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct_msg_rdma_write - Message transferred to server with RDMA-Write
+ * @type: always @RTRS_MSG_WRITE
+ * @usr_len: length of user payload
+ */
+struct rtrs_msg_rdma_write {
+ __le16 type;
+ __le16 usr_len;
+};
+
+/**
+ * struct_msg_rdma_hdr - header for read or write request
+ * @type: @RTRS_MSG_WRITE | @RTRS_MSG_READ
+ */
+struct rtrs_msg_rdma_hdr {
+ __le16 type;
+};
+
+/* rtrs.c */
+
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t t,
+ struct ib_device *dev, enum dma_data_direction,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc));
+void rtrs_iu_free(struct rtrs_iu *iu, enum dma_data_direction dir,
+ struct ib_device *dev, u32 queue_size);
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu);
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head);
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
+int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ u32 imm_data, enum ib_send_flags flags,
+ struct ib_send_wr *head);
+
+int rtrs_cq_qp_create(struct rtrs_sess *rtrs_sess, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, u16 cq_size,
+ u16 wr_queue_size, enum ib_poll_context poll_ctx);
+void rtrs_cq_qp_destroy(struct rtrs_con *con);
+
+void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq);
+void rtrs_start_hb(struct rtrs_sess *sess);
+void rtrs_stop_hb(struct rtrs_sess *sess);
+void rtrs_send_hb_ack(struct rtrs_sess *sess);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool);
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool);
+
+struct rtrs_ib_dev *rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool);
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev);
+
+static inline u32 rtrs_to_imm(u32 type, u32 payload)
+{
+ BUILD_BUG_ON(MAX_IMM_PAYL_BITS + MAX_IMM_TYPE_BITS != 32);
+ BUILD_BUG_ON(RTRS_LAST_IMM > (1<<MAX_IMM_TYPE_BITS));
+ return ((type & MAX_IMM_TYPE_MASK) << MAX_IMM_PAYL_BITS) |
+ (payload & MAX_IMM_PAYL_MASK);
+}
+
+static inline void rtrs_from_imm(u32 imm, u32 *type, u32 *payload)
+{
+ *payload = imm & MAX_IMM_PAYL_MASK;
+ *type = imm >> MAX_IMM_PAYL_BITS;
+}
+
+static inline u32 rtrs_to_io_req_imm(u32 addr)
+{
+ return rtrs_to_imm(RTRS_IO_REQ_IMM, addr);
+}
+
+static inline u32 rtrs_to_io_rsp_imm(u32 msg_id, int errno, bool w_inval)
+{
+ enum rtrs_imm_type type;
+ u32 payload;
+
+ /* 9 bits for errno, 19 bits for msg_id */
+ payload = (abs(errno) & 0x1ff) << 19 | (msg_id & 0x7ffff);
+ type = w_inval ? RTRS_IO_RSP_W_INV_IMM : RTRS_IO_RSP_IMM;
+
+ return rtrs_to_imm(type, payload);
+}
+
+static inline void rtrs_from_io_rsp_imm(u32 payload, u32 *msg_id, int *errno)
+{
+ /* 9 bits for errno, 19 bits for msg_id */
+ *msg_id = payload & 0x7ffff;
+ *errno = -(int)((payload >> 19) & 0x1ff);
+}
+
+#define STAT_STORE_FUNC(type, set_value, reset) \
+static ssize_t set_value##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int ret = -EINVAL; \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ if (sysfs_streq(buf, "1")) \
+ ret = reset(stats, true); \
+ else if (sysfs_streq(buf, "0")) \
+ ret = reset(stats, false); \
+ if (ret) \
+ return ret; \
+ \
+ return count; \
+}
+
+#define STAT_SHOW_FUNC(type, get_value, print) \
+static ssize_t get_value##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ return print(stats, page, PAGE_SIZE); \
+}
+
+#define STAT_ATTR(type, stat, print, reset) \
+STAT_STORE_FUNC(type, stat, reset) \
+STAT_SHOW_FUNC(type, stat, print) \
+static struct kobj_attribute stat##_attr = __ATTR_RW(stat)
+
+#endif /* RTRS_PRI_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
new file mode 100644
index 000000000000..e102b1368d0c
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-srv.h"
+
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
+{
+ if (enable) {
+ struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+
+ memset(r, 0, sizeof(*r));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
+ char *page, size_t len)
+{
+ struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+ struct rtrs_srv_sess *sess = stats->sess;
+
+ return scnprintf(page, len, "%lld %lld %lld %lld %u\n",
+ (s64)atomic64_read(&r->dir[READ].cnt),
+ (s64)atomic64_read(&r->dir[READ].size_total),
+ (s64)atomic64_read(&r->dir[WRITE].cnt),
+ (s64)atomic64_read(&r->dir[WRITE].size_total),
+ atomic_read(&sess->ids_inflight));
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
new file mode 100644
index 000000000000..3d7877534bcc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+
+static void rtrs_srv_release(struct kobject *kobj)
+{
+ struct rtrs_srv_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ kfree(sess);
+}
+
+static struct kobj_type ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_release,
+};
+
+static ssize_t rtrs_srv_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_sess *s;
+ char str[MAXHOSTNAMELEN];
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ s = &sess->s;
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(s, "%s: invalid value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+
+ sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
+
+ rtrs_info(s, "disconnect for path %s requested\n", str);
+ close_sess(sess);
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_srv_disconnect_attr =
+ __ATTR(disconnect, 0644,
+ rtrs_srv_disconnect_show, rtrs_srv_disconnect_store);
+
+static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_con *usr_con;
+
+ sess = container_of(kobj, typeof(*sess), kobj);
+ usr_con = sess->s.con[0];
+
+ return scnprintf(page, PAGE_SIZE, "%u\n",
+ usr_con->cm_id->port_num);
+}
+
+static struct kobj_attribute rtrs_srv_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_srv_hca_port_show, NULL);
+
+static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n",
+ sess->s.dev->ib_dev->name);
+}
+
+static struct kobj_attribute rtrs_srv_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_srv_hca_name_show, NULL);
+
+static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_srv_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_srv_src_addr_show, NULL);
+
+static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_srv_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
+
+static struct attribute *rtrs_srv_sess_attrs[] = {
+ &rtrs_srv_hca_name_attr.attr,
+ &rtrs_srv_hca_port_attr.attr,
+ &rtrs_srv_src_addr_attr.attr,
+ &rtrs_srv_dst_addr_attr.attr,
+ &rtrs_srv_disconnect_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_srv_sess_attr_group = {
+ .attrs = rtrs_srv_sess_attrs,
+};
+
+STAT_ATTR(struct rtrs_srv_stats, rdma,
+ rtrs_srv_stats_rdma_to_str,
+ rtrs_srv_reset_rdma_stats);
+
+static struct attribute *rtrs_srv_stats_attrs[] = {
+ &rdma_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_srv_stats_attr_group = {
+ .attrs = rtrs_srv_stats_attrs,
+};
+
+static void rtrs_srv_dev_release(struct device *dev)
+{
+ struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
+
+ kfree(srv);
+}
+
+static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ int err = 0;
+
+ mutex_lock(&srv->paths_mutex);
+ if (srv->dev_ref++) {
+ /*
+ * Device needs to be registered only on the first session
+ */
+ goto unlock;
+ }
+ srv->dev.class = rtrs_dev_class;
+ srv->dev.release = rtrs_srv_dev_release;
+ err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
+ if (err)
+ goto unlock;
+
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&srv->dev, true);
+ err = device_register(&srv->dev);
+ if (err) {
+ pr_err("device_register(): %d\n", err);
+ goto put;
+ }
+ srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
+ if (!srv->kobj_paths) {
+ err = -ENOMEM;
+ pr_err("kobject_create_and_add(): %d\n", err);
+ device_unregister(&srv->dev);
+ goto unlock;
+ }
+ dev_set_uevent_suppress(&srv->dev, false);
+ kobject_uevent(&srv->dev.kobj, KOBJ_ADD);
+ goto unlock;
+
+put:
+ put_device(&srv->dev);
+unlock:
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+
+static void
+rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+
+ mutex_lock(&srv->paths_mutex);
+ if (!--srv->dev_ref) {
+ kobject_del(srv->kobj_paths);
+ kobject_put(srv->kobj_paths);
+ mutex_unlock(&srv->paths_mutex);
+ device_unregister(&srv->dev);
+ } else {
+ mutex_unlock(&srv->paths_mutex);
+ }
+}
+
+static void rtrs_srv_sess_stats_release(struct kobject *kobj)
+{
+ struct rtrs_srv_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_srv_stats, kobj_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_sess_stats_release,
+};
+
+static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
+{
+ int err;
+ struct rtrs_sess *s = &sess->s;
+
+ err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
+ &sess->kobj, "stats");
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ return err;
+ }
+ err = sysfs_create_group(&sess->stats->kobj_stats,
+ &rtrs_srv_stats_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+
+ return err;
+}
+
+int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ char str[NAME_MAX];
+ int err, cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ str, sizeof(str));
+ cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ str + cnt, sizeof(str) - cnt);
+
+ err = rtrs_srv_create_once_sysfs_root_folders(sess);
+ if (err)
+ return err;
+
+ err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths,
+ "%s", str);
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ goto destroy_root;
+ }
+ err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = rtrs_srv_create_stats_files(sess);
+ if (err)
+ goto remove_group;
+
+ return 0;
+
+remove_group:
+ sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+put_kobj:
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+destroy_root:
+ rtrs_srv_destroy_once_sysfs_root_folders(sess);
+
+ return err;
+}
+
+void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
+{
+ if (sess->kobj.state_in_sysfs) {
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+
+ rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ }
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
new file mode 100644
index 000000000000..0d9241f5d9e6
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -0,0 +1,2178 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+#include <rdma/ib_cm.h>
+
+MODULE_DESCRIPTION("RDMA Transport Server");
+MODULE_LICENSE("GPL");
+
+/* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
+#define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
+#define DEFAULT_SESS_QUEUE_DEPTH 512
+#define MAX_HDR_SIZE PAGE_SIZE
+
+/* We guarantee to serve 10 paths at least */
+#define CHUNK_POOL_SZ 10
+
+static struct rtrs_rdma_dev_pd dev_pd;
+static mempool_t *chunk_pool;
+struct class *rtrs_dev_class;
+
+static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
+static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
+
+static bool always_invalidate = true;
+module_param(always_invalidate, bool, 0444);
+MODULE_PARM_DESC(always_invalidate,
+ "Invalidate memory registration for contiguous memory regions before accessing.");
+
+module_param_named(max_chunk_size, max_chunk_size, int, 0444);
+MODULE_PARM_DESC(max_chunk_size,
+ "Max size for each IO request, when change the unit is in byte (default: "
+ __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
+
+module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
+MODULE_PARM_DESC(sess_queue_depth,
+ "Number of buffers for pending I/O requests to allocate per session. Maximum: "
+ __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
+ __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
+
+static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
+
+static struct workqueue_struct *rtrs_wq;
+
+static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_srv_con, c);
+}
+
+static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s)
+{
+ return container_of(s, struct rtrs_srv_sess, s);
+}
+
+static bool __rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state)
+{
+ enum rtrs_srv_state old_state;
+ bool changed = false;
+
+ lockdep_assert_held(&sess->state_lock);
+ old_state = sess->state;
+ switch (new_state) {
+ case RTRS_SRV_CONNECTED:
+ switch (old_state) {
+ case RTRS_SRV_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_SRV_CLOSING:
+ switch (old_state) {
+ case RTRS_SRV_CONNECTING:
+ case RTRS_SRV_CONNECTED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_SRV_CLOSED:
+ switch (old_state) {
+ case RTRS_SRV_CLOSING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (changed)
+ sess->state = new_state;
+
+ return changed;
+}
+
+static bool rtrs_srv_change_state_get_old(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state,
+ enum rtrs_srv_state *old_state)
+{
+ bool changed;
+
+ spin_lock_irq(&sess->state_lock);
+ *old_state = sess->state;
+ changed = __rtrs_srv_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_lock);
+
+ return changed;
+}
+
+static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state)
+{
+ enum rtrs_srv_state old_state;
+
+ return rtrs_srv_change_state_get_old(sess, new_state, &old_state);
+}
+
+static void free_id(struct rtrs_srv_op *id)
+{
+ if (!id)
+ return;
+ kfree(id);
+}
+
+static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ int i;
+
+ WARN_ON(atomic_read(&sess->ids_inflight));
+ if (sess->ops_ids) {
+ for (i = 0; i < srv->queue_depth; i++)
+ free_id(sess->ops_ids[i]);
+ kfree(sess->ops_ids);
+ sess->ops_ids = NULL;
+ }
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_srv_rdma_done
+};
+
+static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_op *id;
+ int i;
+
+ sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids),
+ GFP_KERNEL);
+ if (!sess->ops_ids)
+ goto err;
+
+ for (i = 0; i < srv->queue_depth; ++i) {
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ goto err;
+
+ sess->ops_ids[i] = id;
+ }
+ init_waitqueue_head(&sess->ids_waitq);
+ atomic_set(&sess->ids_inflight, 0);
+
+ return 0;
+
+err:
+ rtrs_srv_free_ops_ids(sess);
+ return -ENOMEM;
+}
+
+static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess)
+{
+ atomic_inc(&sess->ids_inflight);
+}
+
+static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess)
+{
+ if (atomic_dec_and_test(&sess->ids_inflight))
+ wake_up(&sess->ids_waitq);
+}
+
+static void rtrs_srv_wait_ops_ids(struct rtrs_srv_sess *sess)
+{
+ wait_event(sess->ids_waitq, !atomic_read(&sess->ids_inflight));
+}
+
+
+static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "REG MR failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ return;
+ }
+}
+
+static struct ib_cqe local_reg_cqe = {
+ .done = rtrs_srv_reg_mr_done
+};
+
+static int rdma_write_sg(struct rtrs_srv_op *id)
+{
+ struct rtrs_sess *s = id->con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
+ struct rtrs_srv_mr *srv_mr;
+ struct rtrs_srv *srv = sess->srv;
+ struct ib_send_wr inv_wr, imm_wr;
+ struct ib_rdma_wr *wr = NULL;
+ enum ib_send_flags flags;
+ size_t sg_cnt;
+ int err, offset;
+ bool need_inval;
+ u32 rkey = 0;
+ struct ib_reg_wr rwr;
+ struct ib_sge *plist;
+ struct ib_sge list;
+
+ sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
+ need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
+ if (unlikely(sg_cnt != 1))
+ return -EINVAL;
+
+ offset = 0;
+
+ wr = &id->tx_wr;
+ plist = &id->tx_sg;
+ plist->addr = dma_addr + offset;
+ plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
+
+ /* WR will fail with length error
+ * if this is 0
+ */
+ if (unlikely(plist->length == 0)) {
+ rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
+ return -EINVAL;
+ }
+
+ plist->lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ offset += plist->length;
+
+ wr->wr.sg_list = plist;
+ wr->wr.num_sge = 1;
+ wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr);
+ wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key);
+ if (rkey == 0)
+ rkey = wr->rkey;
+ else
+ /* Only one key is actually used */
+ WARN_ON_ONCE(rkey != wr->rkey);
+
+ wr->wr.opcode = IB_WR_RDMA_WRITE;
+ wr->wr.ex.imm_data = 0;
+ wr->wr.send_flags = 0;
+
+ if (need_inval && always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else if (always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (need_inval) {
+ wr->wr.next = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else {
+ wr->wr.next = &imm_wr;
+ }
+ /*
+ * From time to time we have to post signaled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = (atomic_inc_return(&id->con->wr_cnt) % srv->queue_depth) ?
+ 0 : IB_SEND_SIGNALED;
+
+ if (need_inval) {
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.send_flags = 0;
+ inv_wr.ex.invalidate_rkey = rkey;
+ }
+
+ imm_wr.next = NULL;
+ if (always_invalidate) {
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &sess->mrs[id->msg_id];
+ rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.num_sge = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.wr.send_flags = 0;
+ rwr.key = srv_mr->mr->rkey;
+ rwr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ msg = srv_mr->iu->buf;
+ msg->buf_id = cpu_to_le16(id->msg_id);
+ msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
+ msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
+
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ imm_wr.sg_list = &list;
+ imm_wr.num_sge = 1;
+ imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+ imm_wr.sg_list = NULL;
+ imm_wr.num_sge = 0;
+ imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+ imm_wr.send_flags = flags;
+ imm_wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
+ 0, need_inval));
+
+ imm_wr.wr_cqe = &io_comp_cqe;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
+ offset, DMA_BIDIRECTIONAL);
+
+ err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
+ if (unlikely(err))
+ rtrs_err(s,
+ "Posting RDMA-Write-Request to QP failed, err: %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
+ * requests or on successful WRITE request.
+ * @con: the connection to send back result
+ * @id: the id associated with the IO
+ * @errno: the error number of the IO.
+ *
+ * Return 0 on success, errno otherwise.
+ */
+static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ int errno)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct ib_send_wr inv_wr, imm_wr, *wr = NULL;
+ struct ib_reg_wr rwr;
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_mr *srv_mr;
+ bool need_inval = false;
+ enum ib_send_flags flags;
+ u32 imm;
+ int err;
+
+ if (id->dir == READ) {
+ struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
+ size_t sg_cnt;
+
+ need_inval = le16_to_cpu(rd_msg->flags) &
+ RTRS_MSG_NEED_INVAL_F;
+ sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
+
+ if (need_inval) {
+ if (likely(sg_cnt)) {
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.send_flags = 0;
+ /* Only one key is actually used */
+ inv_wr.ex.invalidate_rkey =
+ le32_to_cpu(rd_msg->desc[0].key);
+ } else {
+ WARN_ON_ONCE(1);
+ need_inval = false;
+ }
+ }
+ }
+
+ if (need_inval && always_invalidate) {
+ wr = &inv_wr;
+ inv_wr.next = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (always_invalidate) {
+ wr = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (need_inval) {
+ wr = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else {
+ wr = &imm_wr;
+ }
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
+ 0 : IB_SEND_SIGNALED;
+ imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
+ imm_wr.next = NULL;
+ if (always_invalidate) {
+ struct ib_sge list;
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &sess->mrs[id->msg_id];
+ rwr.wr.next = &imm_wr;
+ rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.num_sge = 0;
+ rwr.wr.send_flags = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.key = srv_mr->mr->rkey;
+ rwr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ msg = srv_mr->iu->buf;
+ msg->buf_id = cpu_to_le16(id->msg_id);
+ msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
+ msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
+
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ imm_wr.sg_list = &list;
+ imm_wr.num_sge = 1;
+ imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+ imm_wr.sg_list = NULL;
+ imm_wr.num_sge = 0;
+ imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+ imm_wr.send_flags = flags;
+ imm_wr.wr_cqe = &io_comp_cqe;
+
+ imm_wr.ex.imm_data = cpu_to_be32(imm);
+
+ err = ib_post_send(id->con->c.qp, wr, NULL);
+ if (unlikely(err))
+ rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
+ err);
+
+ return err;
+}
+
+void close_sess(struct rtrs_srv_sess *sess)
+{
+ enum rtrs_srv_state old_state;
+
+ if (rtrs_srv_change_state_get_old(sess, RTRS_SRV_CLOSING,
+ &old_state))
+ queue_work(rtrs_wq, &sess->close_work);
+ WARN_ON(sess->state != RTRS_SRV_CLOSING);
+}
+
+static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
+{
+ switch (state) {
+ case RTRS_SRV_CONNECTING:
+ return "RTRS_SRV_CONNECTING";
+ case RTRS_SRV_CONNECTED:
+ return "RTRS_SRV_CONNECTED";
+ case RTRS_SRV_CLOSING:
+ return "RTRS_SRV_CLOSING";
+ case RTRS_SRV_CLOSED:
+ return "RTRS_SRV_CLOSED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/**
+ * rtrs_srv_resp_rdma() - Finish an RDMA request
+ *
+ * @id: Internal RTRS operation identifier
+ * @status: Response Code sent to the other side for this operation.
+ * 0 = success, <=0 error
+ * Context: any
+ *
+ * Finish a RDMA operation. A message is sent to the client and the
+ * corresponding memory areas will be released.
+ */
+bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv_con *con;
+ struct rtrs_sess *s;
+ int err;
+
+ if (WARN_ON(!id))
+ return true;
+
+ con = id->con;
+ s = con->c.sess;
+ sess = to_srv_sess(s);
+
+ id->status = status;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Sending I/O response failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ goto out;
+ }
+ if (always_invalidate) {
+ struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id];
+
+ ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
+ }
+ if (unlikely(atomic_sub_return(1,
+ &con->sq_wr_avail) < 0)) {
+ pr_err("IB send queue full\n");
+ atomic_add(1, &con->sq_wr_avail);
+ spin_lock(&con->rsp_wr_wait_lock);
+ list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
+ spin_unlock(&con->rsp_wr_wait_lock);
+ return false;
+ }
+
+ if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
+ err = send_io_resp_imm(con, id, status);
+ else
+ err = rdma_write_sg(id);
+
+ if (unlikely(err)) {
+ rtrs_err_rl(s, "IO response failed: %d\n", err);
+ close_sess(sess);
+ }
+out:
+ rtrs_srv_put_ops_ids(sess);
+ return true;
+}
+EXPORT_SYMBOL(rtrs_srv_resp_rdma);
+
+/**
+ * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
+ * @srv: Session pointer
+ * @priv: The private pointer that is associated with the session.
+ */
+void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv)
+{
+ srv->priv = priv;
+}
+EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
+
+static void unmap_cont_bufs(struct rtrs_srv_sess *sess)
+{
+ int i;
+
+ for (i = 0; i < sess->mrs_num; i++) {
+ struct rtrs_srv_mr *srv_mr;
+
+ srv_mr = &sess->mrs[i];
+ rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+ ib_dereg_mr(srv_mr->mr);
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl,
+ srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
+ sg_free_table(&srv_mr->sgt);
+ }
+ kfree(sess->mrs);
+}
+
+static int map_cont_bufs(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *ss = &sess->s;
+ int i, mri, err, mrs_num;
+ unsigned int chunk_bits;
+ int chunks_per_mr = 1;
+
+ /*
+ * Here we map queue_depth chunks to MR. Firstly we have to
+ * figure out how many chunks can we map per MR.
+ */
+ if (always_invalidate) {
+ /*
+ * in order to do invalidate for each chunks of memory, we needs
+ * more memory regions.
+ */
+ mrs_num = srv->queue_depth;
+ } else {
+ chunks_per_mr =
+ sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
+ mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
+ chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
+ }
+
+ sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL);
+ if (!sess->mrs)
+ return -ENOMEM;
+
+ sess->mrs_num = mrs_num;
+
+ for (mri = 0; mri < mrs_num; mri++) {
+ struct rtrs_srv_mr *srv_mr = &sess->mrs[mri];
+ struct sg_table *sgt = &srv_mr->sgt;
+ struct scatterlist *s;
+ struct ib_mr *mr;
+ int nr, chunks;
+
+ chunks = chunks_per_mr * mri;
+ if (!always_invalidate)
+ chunks_per_mr = min_t(int, chunks_per_mr,
+ srv->queue_depth - chunks);
+
+ err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
+ if (err)
+ goto err;
+
+ for_each_sg(sgt->sgl, s, chunks_per_mr, i)
+ sg_set_page(s, srv->chunks[chunks + i],
+ max_chunk_size, 0);
+
+ nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+ if (nr < sgt->nents) {
+ err = nr < 0 ? nr : -EINVAL;
+ goto free_sg;
+ }
+ mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ sgt->nents);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto unmap_sg;
+ }
+ nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
+ NULL, max_chunk_size);
+ if (nr < 0 || nr < sgt->nents) {
+ err = nr < 0 ? nr : -EINVAL;
+ goto dereg_mr;
+ }
+
+ if (always_invalidate) {
+ srv_mr->iu = rtrs_iu_alloc(1,
+ sizeof(struct rtrs_msg_rkey_rsp),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_TO_DEVICE, rtrs_srv_rdma_done);
+ if (!srv_mr->iu) {
+ err = -ENOMEM;
+ rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
+ goto free_iu;
+ }
+ }
+ /* Eventually dma addr for each chunk can be cached */
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+ sess->dma_addr[chunks + i] = sg_dma_address(s);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+ srv_mr->mr = mr;
+
+ continue;
+err:
+ while (mri--) {
+ srv_mr = &sess->mrs[mri];
+ sgt = &srv_mr->sgt;
+ mr = srv_mr->mr;
+free_iu:
+ rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+dereg_mr:
+ ib_dereg_mr(mr);
+unmap_sg:
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+free_sg:
+ sg_free_table(sgt);
+ }
+ kfree(sess->mrs);
+
+ return err;
+ }
+
+ chunk_bits = ilog2(srv->queue_depth - 1) + 1;
+ sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
+
+ return 0;
+}
+
+static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
+{
+ close_sess(to_srv_sess(c->sess));
+}
+
+static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_init_hb(&sess->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_srv_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_start_hb(&sess->s);
+}
+
+static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_stop_hb(&sess->s);
+}
+
+static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Sess info response send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ return;
+ }
+ WARN_ON(wc->opcode != IB_WC_SEND);
+}
+
+static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ int up;
+
+ mutex_lock(&srv->paths_ev_mutex);
+ up = ++srv->paths_up;
+ if (up == 1)
+ ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+ mutex_unlock(&srv->paths_ev_mutex);
+
+ /* Mark session as established */
+ sess->established = true;
+}
+
+static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+
+ if (!sess->established)
+ return;
+
+ sess->established = false;
+ mutex_lock(&srv->paths_ev_mutex);
+ WARN_ON(!srv->paths_up);
+ if (--srv->paths_up == 0)
+ ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
+ mutex_unlock(&srv->paths_ev_mutex);
+}
+
+static int post_recv_sess(struct rtrs_srv_sess *sess);
+
+static int process_info_req(struct rtrs_srv_con *con,
+ struct rtrs_msg_info_req *msg)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct ib_send_wr *reg_wr = NULL;
+ struct rtrs_msg_info_rsp *rsp;
+ struct rtrs_iu *tx_iu;
+ struct ib_reg_wr *rwr;
+ int mri, err;
+ size_t tx_sz;
+
+ err = post_recv_sess(sess);
+ if (unlikely(err)) {
+ rtrs_err(s, "post_recv_sess(), err: %d\n", err);
+ return err;
+ }
+ rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL);
+ if (unlikely(!rwr))
+ return -ENOMEM;
+ strlcpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
+
+ tx_sz = sizeof(*rsp);
+ tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num;
+ tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
+ if (unlikely(!tx_iu)) {
+ err = -ENOMEM;
+ goto rwr_free;
+ }
+
+ rsp = tx_iu->buf;
+ rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
+ rsp->sg_cnt = cpu_to_le16(sess->mrs_num);
+
+ for (mri = 0; mri < sess->mrs_num; mri++) {
+ struct ib_mr *mr = sess->mrs[mri].mr;
+
+ rsp->desc[mri].addr = cpu_to_le64(mr->iova);
+ rsp->desc[mri].key = cpu_to_le32(mr->rkey);
+ rsp->desc[mri].len = cpu_to_le32(mr->length);
+
+ /*
+ * Fill in reg MR request and chain them *backwards*
+ */
+ rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
+ rwr[mri].wr.opcode = IB_WR_REG_MR;
+ rwr[mri].wr.wr_cqe = &local_reg_cqe;
+ rwr[mri].wr.num_sge = 0;
+ rwr[mri].wr.send_flags = mri ? 0 : IB_SEND_SIGNALED;
+ rwr[mri].mr = mr;
+ rwr[mri].key = mr->rkey;
+ rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ reg_wr = &rwr[mri].wr;
+ }
+
+ err = rtrs_srv_create_sess_files(sess);
+ if (unlikely(err))
+ goto iu_free;
+ kobject_get(&sess->kobj);
+ get_device(&sess->srv->dev);
+ rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED);
+ rtrs_srv_start_hb(sess);
+
+ /*
+ * We do not account number of established connections at the current
+ * moment, we rely on the client, which should send info request when
+ * all connections are successfully established. Thus, simply notify
+ * listener with a proper event if we are the first path.
+ */
+ rtrs_srv_sess_up(sess);
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info response */
+ err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
+iu_free:
+ rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ }
+rwr_free:
+ kfree(rwr);
+
+ return err;
+}
+
+static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *iu;
+ int err;
+
+ WARN_ON(con->c.cid);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Sess info request receive failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto close;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(s, "Sess info request is malformed: size %d\n",
+ wc->byte_len);
+ goto close;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ)) {
+ rtrs_err(s, "Sess info request is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto close;
+ }
+ err = process_info_req(con, msg);
+ if (unlikely(err))
+ goto close;
+
+out:
+ rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ return;
+close:
+ close_sess(sess);
+ goto out;
+}
+
+static int post_recv_info_req(struct rtrs_srv_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_iu *rx_iu;
+ int err;
+
+ rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_srv_info_req_done);
+ if (unlikely(!rx_iu))
+ return -ENOMEM;
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&con->c, rx_iu);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
+ rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
+{
+ int i, err;
+
+ for (i = 0; i < q_size; i++) {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err))
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_sess(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ size_t q_size;
+ int err, cid;
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = srv->queue_depth;
+
+ err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size);
+ if (unlikely(err)) {
+ rtrs_err(s, "post_recv_io(), err: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void process_read(struct rtrs_srv_con *con,
+ struct rtrs_msg_rdma_read *msg,
+ u32 buf_id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ struct rtrs_srv_op *id;
+
+ size_t usr_len, data_len;
+ void *data;
+ int ret;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ return;
+ }
+ if (unlikely(msg->sg_cnt != 1 && msg->sg_cnt != 0)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, invalid message\n");
+ return;
+ }
+ rtrs_srv_get_ops_ids(sess);
+ rtrs_srv_update_rdma_stats(sess->stats, off, READ);
+ id = sess->ops_ids[buf_id];
+ id->con = con;
+ id->dir = READ;
+ id->msg_id = buf_id;
+ id->rd_msg = msg;
+ usr_len = le16_to_cpu(msg->usr_len);
+ data_len = off - usr_len;
+ data = page_address(srv->chunks[buf_id]);
+ ret = ctx->ops.rdma_ev(srv, srv->priv, id, READ, data, data_len,
+ data + data_len, usr_len);
+
+ if (unlikely(ret)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
+ buf_id, ret);
+ goto send_err_msg;
+ }
+
+ return;
+
+send_err_msg:
+ ret = send_io_resp_imm(con, id, ret);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
+ buf_id, ret);
+ close_sess(sess);
+ }
+ rtrs_srv_put_ops_ids(sess);
+}
+
+static void process_write(struct rtrs_srv_con *con,
+ struct rtrs_msg_rdma_write *req,
+ u32 buf_id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ struct rtrs_srv_op *id;
+
+ size_t data_len, usr_len;
+ void *data;
+ int ret;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Processing write request failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ return;
+ }
+ rtrs_srv_get_ops_ids(sess);
+ rtrs_srv_update_rdma_stats(sess->stats, off, WRITE);
+ id = sess->ops_ids[buf_id];
+ id->con = con;
+ id->dir = WRITE;
+ id->msg_id = buf_id;
+
+ usr_len = le16_to_cpu(req->usr_len);
+ data_len = off - usr_len;
+ data = page_address(srv->chunks[buf_id]);
+ ret = ctx->ops.rdma_ev(srv, srv->priv, id, WRITE, data, data_len,
+ data + data_len, usr_len);
+ if (unlikely(ret)) {
+ rtrs_err_rl(s,
+ "Processing write request failed, user module callback reports err: %d\n",
+ ret);
+ goto send_err_msg;
+ }
+
+ return;
+
+send_err_msg:
+ ret = send_io_resp_imm(con, id, ret);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
+ buf_id, ret);
+ close_sess(sess);
+ }
+ rtrs_srv_put_ops_ids(sess);
+}
+
+static void process_io_req(struct rtrs_srv_con *con, void *msg,
+ u32 id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_msg_rdma_hdr *hdr;
+ unsigned int type;
+
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id],
+ max_chunk_size, DMA_BIDIRECTIONAL);
+ hdr = msg;
+ type = le16_to_cpu(hdr->type);
+
+ switch (type) {
+ case RTRS_MSG_WRITE:
+ process_write(con, msg, id, off);
+ break;
+ case RTRS_MSG_READ:
+ process_read(con, msg, id, off);
+ break;
+ default:
+ rtrs_err(s,
+ "Processing I/O request failed, unknown message type received: 0x%02x\n",
+ type);
+ goto err;
+ }
+
+ return;
+
+err:
+ close_sess(sess);
+}
+
+static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_mr *mr =
+ container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ u32 msg_id, off;
+ void *data;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ }
+ msg_id = mr->msg_id;
+ off = mr->msg_off;
+ data = page_address(srv->chunks[msg_id]) + off;
+ process_io_req(con, data, msg_id, off);
+}
+
+static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
+ struct rtrs_srv_mr *mr)
+{
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &mr->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = mr->mr->rkey,
+ };
+ mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
+{
+ spin_lock(&con->rsp_wr_wait_lock);
+ while (!list_empty(&con->rsp_wr_wait_list)) {
+ struct rtrs_srv_op *id;
+ int ret;
+
+ id = list_entry(con->rsp_wr_wait_list.next,
+ struct rtrs_srv_op, wait_list);
+ list_del(&id->wait_list);
+
+ spin_unlock(&con->rsp_wr_wait_lock);
+ ret = rtrs_srv_resp_rdma(id, id->status);
+ spin_lock(&con->rsp_wr_wait_lock);
+
+ if (!ret) {
+ list_add(&id->wait_list, &con->rsp_wr_wait_list);
+ break;
+ }
+ }
+ spin_unlock(&con->rsp_wr_wait_lock);
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ u32 imm_type, imm_payload;
+ int err;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(s,
+ "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
+ ib_wc_status_msg(wc->status), wc->wr_cqe,
+ wc->opcode, wc->vendor_err, wc->byte_len);
+ close_sess(sess);
+ }
+ return;
+ }
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
+ return;
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
+ close_sess(sess);
+ break;
+ }
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_REQ_IMM)) {
+ u32 msg_id, off;
+ void *data;
+
+ msg_id = imm_payload >> sess->mem_bits;
+ off = imm_payload & ((1 << sess->mem_bits) - 1);
+ if (unlikely(msg_id >= srv->queue_depth ||
+ off >= max_chunk_size)) {
+ rtrs_err(s, "Wrong msg_id %u, off %u\n",
+ msg_id, off);
+ close_sess(sess);
+ return;
+ }
+ if (always_invalidate) {
+ struct rtrs_srv_mr *mr = &sess->mrs[msg_id];
+
+ mr->msg_off = off;
+ mr->msg_id = msg_id;
+ err = rtrs_srv_inv_rkey(con, mr);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n",
+ err);
+ close_sess(sess);
+ break;
+ }
+ } else {
+ data = page_address(srv->chunks[msg_id]) + off;
+ process_io_req(con, data, msg_id, off);
+ }
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&sess->s);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ sess->s.hb_missed_cnt = 0;
+ } else {
+ rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ case IB_WC_SEND:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ atomic_add(srv->queue_depth, &con->sq_wr_avail);
+
+ if (unlikely(!list_empty_careful(&con->rsp_wr_wait_list)))
+ rtrs_rdma_process_wr_wait_list(con);
+
+ break;
+ default:
+ rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+/**
+ * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname.
+ * @srv: Session
+ * @sessname: Sessname buffer
+ * @len: Length of sessname buffer
+ */
+int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
+{
+ struct rtrs_srv_sess *sess;
+ int err = -ENOTCONN;
+
+ mutex_lock(&srv->paths_mutex);
+ list_for_each_entry(sess, &srv->paths_list, s.entry) {
+ if (sess->state != RTRS_SRV_CONNECTED)
+ continue;
+ strlcpy(sessname, sess->s.sessname,
+ min_t(size_t, sizeof(sess->s.sessname), len));
+ err = 0;
+ break;
+ }
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_srv_get_sess_name);
+
+/**
+ * rtrs_srv_get_sess_qdepth() - Get rtrs_srv qdepth.
+ * @srv: Session
+ */
+int rtrs_srv_get_queue_depth(struct rtrs_srv *srv)
+{
+ return srv->queue_depth;
+}
+EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
+
+static int find_next_bit_ring(struct rtrs_srv_sess *sess)
+{
+ struct ib_device *ib_dev = sess->s.dev->ib_dev;
+ int v;
+
+ v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask);
+ if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
+ v = cpumask_first(&cq_affinity_mask);
+ return v;
+}
+
+static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
+{
+ sess->cur_cq_vector = find_next_bit_ring(sess);
+
+ return sess->cur_cq_vector;
+}
+
+static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+ int i;
+
+ srv = kzalloc(sizeof(*srv), GFP_KERNEL);
+ if (!srv)
+ return NULL;
+
+ refcount_set(&srv->refcount, 1);
+ INIT_LIST_HEAD(&srv->paths_list);
+ mutex_init(&srv->paths_mutex);
+ mutex_init(&srv->paths_ev_mutex);
+ uuid_copy(&srv->paths_uuid, paths_uuid);
+ srv->queue_depth = sess_queue_depth;
+ srv->ctx = ctx;
+
+ srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
+ GFP_KERNEL);
+ if (!srv->chunks)
+ goto err_free_srv;
+
+ for (i = 0; i < srv->queue_depth; i++) {
+ srv->chunks[i] = mempool_alloc(chunk_pool, GFP_KERNEL);
+ if (!srv->chunks[i])
+ goto err_free_chunks;
+ }
+ list_add(&srv->ctx_list, &ctx->srv_list);
+
+ return srv;
+
+err_free_chunks:
+ while (i--)
+ mempool_free(srv->chunks[i], chunk_pool);
+ kfree(srv->chunks);
+
+err_free_srv:
+ kfree(srv);
+
+ return NULL;
+}
+
+static void free_srv(struct rtrs_srv *srv)
+{
+ int i;
+
+ WARN_ON(refcount_read(&srv->refcount));
+ for (i = 0; i < srv->queue_depth; i++)
+ mempool_free(srv->chunks[i], chunk_pool);
+ kfree(srv->chunks);
+ mutex_destroy(&srv->paths_mutex);
+ mutex_destroy(&srv->paths_ev_mutex);
+ /* last put to release the srv structure */
+ put_device(&srv->dev);
+}
+
+static inline struct rtrs_srv *__find_srv_and_get(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
+ if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
+ refcount_inc_not_zero(&srv->refcount))
+ return srv;
+ }
+
+ return NULL;
+}
+
+static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+
+ mutex_lock(&ctx->srv_mutex);
+ srv = __find_srv_and_get(ctx, paths_uuid);
+ if (!srv)
+ srv = __alloc_srv(ctx, paths_uuid);
+ mutex_unlock(&ctx->srv_mutex);
+
+ return srv;
+}
+
+static void put_srv(struct rtrs_srv *srv)
+{
+ if (refcount_dec_and_test(&srv->refcount)) {
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+
+ WARN_ON(srv->dev.kobj.state_in_sysfs);
+
+ mutex_lock(&ctx->srv_mutex);
+ list_del(&srv->ctx_list);
+ mutex_unlock(&ctx->srv_mutex);
+ free_srv(srv);
+ }
+}
+
+static void __add_path_to_srv(struct rtrs_srv *srv,
+ struct rtrs_srv_sess *sess)
+{
+ list_add_tail(&sess->s.entry, &srv->paths_list);
+ srv->paths_num++;
+ WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
+}
+
+static void del_path_from_srv(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+
+ if (WARN_ON(!srv))
+ return;
+
+ mutex_lock(&srv->paths_mutex);
+ list_del(&sess->s.entry);
+ WARN_ON(!srv->paths_num);
+ srv->paths_num--;
+ mutex_unlock(&srv->paths_mutex);
+}
+
+/* return true if addresses are the same, error other wise */
+static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
+{
+ switch (a->sa_family) {
+ case AF_IB:
+ return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
+ &((struct sockaddr_ib *)b)->sib_addr,
+ sizeof(struct ib_addr)) &&
+ (b->sa_family == AF_IB);
+ case AF_INET:
+ return memcmp(&((struct sockaddr_in *)a)->sin_addr,
+ &((struct sockaddr_in *)b)->sin_addr,
+ sizeof(struct in_addr)) &&
+ (b->sa_family == AF_INET);
+ case AF_INET6:
+ return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
+ &((struct sockaddr_in6 *)b)->sin6_addr,
+ sizeof(struct in6_addr)) &&
+ (b->sa_family == AF_INET6);
+ default:
+ return -ENOENT;
+ }
+}
+
+static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
+ struct rdma_addr *addr)
+{
+ struct rtrs_srv_sess *sess;
+
+ list_for_each_entry(sess, &srv->paths_list, s.entry)
+ if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr,
+ (struct sockaddr *)&addr->dst_addr) &&
+ !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr,
+ (struct sockaddr *)&addr->src_addr))
+ return true;
+
+ return false;
+}
+
+static void free_sess(struct rtrs_srv_sess *sess)
+{
+ if (sess->kobj.state_in_sysfs)
+ kobject_put(&sess->kobj);
+ else
+ kfree(sess);
+}
+
+static void rtrs_srv_close_work(struct work_struct *work)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv_con *con;
+ int i;
+
+ sess = container_of(work, typeof(*sess), close_work);
+
+ rtrs_srv_destroy_sess_files(sess);
+ rtrs_srv_stop_hb(sess);
+
+ for (i = 0; i < sess->s.con_num; i++) {
+ if (!sess->s.con[i])
+ continue;
+ con = to_srv_con(sess->s.con[i]);
+ rdma_disconnect(con->c.cm_id);
+ ib_drain_qp(con->c.qp);
+ }
+ /* Wait for all inflights */
+ rtrs_srv_wait_ops_ids(sess);
+
+ /* Notify upper layer if we are the last path */
+ rtrs_srv_sess_down(sess);
+
+ unmap_cont_bufs(sess);
+ rtrs_srv_free_ops_ids(sess);
+
+ for (i = 0; i < sess->s.con_num; i++) {
+ if (!sess->s.con[i])
+ continue;
+ con = to_srv_con(sess->s.con[i]);
+ rtrs_cq_qp_destroy(&con->c);
+ rdma_destroy_id(con->c.cm_id);
+ kfree(con);
+ }
+ rtrs_ib_dev_put(sess->s.dev);
+
+ del_path_from_srv(sess);
+ put_srv(sess->srv);
+ sess->srv = NULL;
+ rtrs_srv_change_state(sess, RTRS_SRV_CLOSED);
+
+ kfree(sess->dma_addr);
+ kfree(sess->s.con);
+ free_sess(sess);
+}
+
+static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess,
+ struct rdma_cm_id *cm_id)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_msg_conn_rsp msg;
+ struct rdma_conn_param param;
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_rsp) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .queue_depth = cpu_to_le16(srv->queue_depth),
+ .max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
+ .max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
+ };
+
+ if (always_invalidate)
+ msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
+
+ err = rdma_accept(cm_id, &param);
+ if (err)
+ pr_err("rdma_accept(), err: %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
+{
+ struct rtrs_msg_conn_rsp msg;
+ int err;
+
+ msg = (struct rtrs_msg_conn_rsp) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .errno = cpu_to_le16(errno),
+ };
+
+ err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
+ if (err)
+ pr_err("rdma_reject(), err: %d\n", err);
+
+ /* Bounce errno back */
+ return errno;
+}
+
+static struct rtrs_srv_sess *
+__find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid)
+{
+ struct rtrs_srv_sess *sess;
+
+ list_for_each_entry(sess, &srv->paths_list, s.entry) {
+ if (uuid_equal(&sess->s.uuid, sess_uuid))
+ return sess;
+ }
+
+ return NULL;
+}
+
+static int create_con(struct rtrs_srv_sess *sess,
+ struct rdma_cm_id *cm_id,
+ unsigned int cid)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_con *con;
+
+ u16 cq_size, wr_queue_size;
+ int err, cq_vector;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&con->rsp_wr_wait_lock);
+ INIT_LIST_HEAD(&con->rsp_wr_wait_list);
+ con->c.cm_id = cm_id;
+ con->c.sess = &sess->s;
+ con->c.cid = cid;
+ atomic_set(&con->wr_cnt, 0);
+
+ if (con->c.cid == 0) {
+ /*
+ * All receive and all send (each requiring invalidate)
+ * + 2 for drain and heartbeat
+ */
+ wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+ cq_size = wr_queue_size;
+ } else {
+ /*
+ * If we have all receive requests posted and
+ * all write requests posted and each read request
+ * requires an invalidate request + drain
+ * and qp gets into error state.
+ */
+ cq_size = srv->queue_depth * 3 + 1;
+ /*
+ * In theory we might have queue_depth * 32
+ * outstanding requests if an unsafe global key is used
+ * and we have queue_depth read requests each consisting
+ * of 32 different addresses. div 3 for mlx5.
+ */
+ wr_queue_size = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
+ }
+ atomic_set(&con->sq_wr_avail, wr_queue_size);
+ cq_vector = rtrs_srv_get_next_cq_vector(sess);
+
+ /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
+ err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
+ wr_queue_size, IB_POLL_WORKQUEUE);
+ if (err) {
+ rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
+ goto free_con;
+ }
+ if (con->c.cid == 0) {
+ err = post_recv_info_req(con);
+ if (err)
+ goto free_cqqp;
+ }
+ WARN_ON(sess->s.con[cid]);
+ sess->s.con[cid] = &con->c;
+
+ /*
+ * Change context from server to current connection. The other
+ * way is to use cm_id->qp->qp_context, which does not work on OFED.
+ */
+ cm_id->context = &con->c;
+
+ return 0;
+
+free_cqqp:
+ rtrs_cq_qp_destroy(&con->c);
+free_con:
+ kfree(con);
+
+err:
+ return err;
+}
+
+static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
+ struct rdma_cm_id *cm_id,
+ unsigned int con_num,
+ unsigned int recon_cnt,
+ const uuid_t *uuid)
+{
+ struct rtrs_srv_sess *sess;
+ int err = -ENOMEM;
+
+ if (srv->paths_num >= MAX_PATHS_NUM) {
+ err = -ECONNRESET;
+ goto err;
+ }
+ if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
+ err = -EEXIST;
+ pr_err("Path with same addr exists\n");
+ goto err;
+ }
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ goto err;
+
+ sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
+ if (!sess->stats)
+ goto err_free_sess;
+
+ sess->stats->sess = sess;
+
+ sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr),
+ GFP_KERNEL);
+ if (!sess->dma_addr)
+ goto err_free_stats;
+
+ sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
+ if (!sess->s.con)
+ goto err_free_dma_addr;
+
+ sess->state = RTRS_SRV_CONNECTING;
+ sess->srv = srv;
+ sess->cur_cq_vector = -1;
+ sess->s.dst_addr = cm_id->route.addr.dst_addr;
+ sess->s.src_addr = cm_id->route.addr.src_addr;
+ sess->s.con_num = con_num;
+ sess->s.recon_cnt = recon_cnt;
+ uuid_copy(&sess->s.uuid, uuid);
+ spin_lock_init(&sess->state_lock);
+ INIT_WORK(&sess->close_work, rtrs_srv_close_work);
+ rtrs_srv_init_hb(sess);
+
+ sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
+ if (!sess->s.dev) {
+ err = -ENOMEM;
+ goto err_free_con;
+ }
+ err = map_cont_bufs(sess);
+ if (err)
+ goto err_put_dev;
+
+ err = rtrs_srv_alloc_ops_ids(sess);
+ if (err)
+ goto err_unmap_bufs;
+
+ __add_path_to_srv(srv, sess);
+
+ return sess;
+
+err_unmap_bufs:
+ unmap_cont_bufs(sess);
+err_put_dev:
+ rtrs_ib_dev_put(sess->s.dev);
+err_free_con:
+ kfree(sess->s.con);
+err_free_dma_addr:
+ kfree(sess->dma_addr);
+err_free_stats:
+ kfree(sess->stats);
+err_free_sess:
+ kfree(sess);
+err:
+ return ERR_PTR(err);
+}
+
+static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
+ const struct rtrs_msg_conn_req *msg,
+ size_t len)
+{
+ struct rtrs_srv_ctx *ctx = cm_id->context;
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv *srv;
+
+ u16 version, con_num, cid;
+ u16 recon_cnt;
+ int err;
+
+ if (len < sizeof(*msg)) {
+ pr_err("Invalid RTRS connection request\n");
+ goto reject_w_econnreset;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ pr_err("Invalid RTRS magic\n");
+ goto reject_w_econnreset;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ pr_err("Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ goto reject_w_econnreset;
+ }
+ con_num = le16_to_cpu(msg->cid_num);
+ if (con_num > 4096) {
+ /* Sanity check */
+ pr_err("Too many connections requested: %d\n", con_num);
+ goto reject_w_econnreset;
+ }
+ cid = le16_to_cpu(msg->cid);
+ if (cid >= con_num) {
+ /* Sanity check */
+ pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
+ goto reject_w_econnreset;
+ }
+ recon_cnt = le16_to_cpu(msg->recon_cnt);
+ srv = get_or_create_srv(ctx, &msg->paths_uuid);
+ if (!srv) {
+ err = -ENOMEM;
+ goto reject_w_err;
+ }
+ mutex_lock(&srv->paths_mutex);
+ sess = __find_sess(srv, &msg->sess_uuid);
+ if (sess) {
+ struct rtrs_sess *s = &sess->s;
+
+ /* Session already holds a reference */
+ put_srv(srv);
+
+ if (sess->state != RTRS_SRV_CONNECTING) {
+ rtrs_err(s, "Session in wrong state: %s\n",
+ rtrs_srv_state_str(sess->state));
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ /*
+ * Sanity checks
+ */
+ if (con_num != s->con_num || cid >= s->con_num) {
+ rtrs_err(s, "Incorrect request: %d, %d\n",
+ cid, con_num);
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ if (s->con[cid]) {
+ rtrs_err(s, "Connection already exists: %d\n",
+ cid);
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ } else {
+ sess = __alloc_sess(srv, cm_id, con_num, recon_cnt,
+ &msg->sess_uuid);
+ if (IS_ERR(sess)) {
+ mutex_unlock(&srv->paths_mutex);
+ put_srv(srv);
+ err = PTR_ERR(sess);
+ goto reject_w_err;
+ }
+ }
+ err = create_con(sess, cm_id, cid);
+ if (err) {
+ (void)rtrs_rdma_do_reject(cm_id, err);
+ /*
+ * Since session has other connections we follow normal way
+ * through workqueue, but still return an error to tell cma.c
+ * to call rdma_destroy_id() for current connection.
+ */
+ goto close_and_return_err;
+ }
+ err = rtrs_rdma_do_accept(sess, cm_id);
+ if (err) {
+ (void)rtrs_rdma_do_reject(cm_id, err);
+ /*
+ * Since current connection was successfully added to the
+ * session we follow normal way through workqueue to close the
+ * session, thus return 0 to tell cma.c we call
+ * rdma_destroy_id() ourselves.
+ */
+ err = 0;
+ goto close_and_return_err;
+ }
+ mutex_unlock(&srv->paths_mutex);
+
+ return 0;
+
+reject_w_err:
+ return rtrs_rdma_do_reject(cm_id, err);
+
+reject_w_econnreset:
+ return rtrs_rdma_do_reject(cm_id, -ECONNRESET);
+
+close_and_return_err:
+ close_sess(sess);
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+
+static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_srv_sess *sess = NULL;
+ struct rtrs_sess *s = NULL;
+
+ if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
+ struct rtrs_con *c = cm_id->context;
+
+ s = c->sess;
+ sess = to_srv_sess(s);
+ }
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ /*
+ * In case of error cma.c will destroy cm_id,
+ * see cma_process_remove()
+ */
+ return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
+ ev->param.conn.private_data_len);
+ case RDMA_CM_EVENT_ESTABLISHED:
+ /* Nothing here */
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ close_sess(sess);
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ close_sess(sess);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ close_sess(sess);
+ break;
+ default:
+ pr_err("Ignoring unexpected CM event %s, err %d\n",
+ rdma_event_msg(ev->event), ev->status);
+ break;
+ }
+
+ return 0;
+}
+
+static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
+ struct sockaddr *addr,
+ enum rdma_ucm_port_space ps)
+{
+ struct rdma_cm_id *cm_id;
+ int ret;
+
+ cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
+ ctx, ps, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
+ pr_err("Creating id for RDMA connection failed, err: %d\n",
+ ret);
+ goto err_out;
+ }
+ ret = rdma_bind_addr(cm_id, addr);
+ if (ret) {
+ pr_err("Binding RDMA address failed, err: %d\n", ret);
+ goto err_cm;
+ }
+ ret = rdma_listen(cm_id, 64);
+ if (ret) {
+ pr_err("Listening on RDMA connection failed, err: %d\n",
+ ret);
+ goto err_cm;
+ }
+
+ return cm_id;
+
+err_cm:
+ rdma_destroy_id(cm_id);
+err_out:
+
+ return ERR_PTR(ret);
+}
+
+static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
+{
+ struct sockaddr_in6 sin = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = IN6ADDR_ANY_INIT,
+ .sin6_port = htons(port),
+ };
+ struct sockaddr_ib sib = {
+ .sib_family = AF_IB,
+ .sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port),
+ .sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL),
+ .sib_pkey = cpu_to_be16(0xffff),
+ };
+ struct rdma_cm_id *cm_ip, *cm_ib;
+ int ret;
+
+ /*
+ * We accept both IPoIB and IB connections, so we need to keep
+ * two cm id's, one for each socket type and port space.
+ * If the cm initialization of one of the id's fails, we abort
+ * everything.
+ */
+ cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
+ if (IS_ERR(cm_ip))
+ return PTR_ERR(cm_ip);
+
+ cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
+ if (IS_ERR(cm_ib)) {
+ ret = PTR_ERR(cm_ib);
+ goto free_cm_ip;
+ }
+
+ ctx->cm_id_ip = cm_ip;
+ ctx->cm_id_ib = cm_ib;
+
+ return 0;
+
+free_cm_ip:
+ rdma_destroy_id(cm_ip);
+
+ return ret;
+}
+
+static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
+{
+ struct rtrs_srv_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->ops = *ops;
+ mutex_init(&ctx->srv_mutex);
+ INIT_LIST_HEAD(&ctx->srv_list);
+
+ return ctx;
+}
+
+static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
+{
+ WARN_ON(!list_empty(&ctx->srv_list));
+ mutex_destroy(&ctx->srv_mutex);
+ kfree(ctx);
+}
+
+/**
+ * rtrs_srv_open() - open RTRS server context
+ * @ops: callback functions
+ * @port: port to listen on
+ *
+ * Creates server context with specified callbacks.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
+{
+ struct rtrs_srv_ctx *ctx;
+ int err;
+
+ ctx = alloc_srv_ctx(ops);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ err = rtrs_srv_rdma_init(ctx, port);
+ if (err) {
+ free_srv_ctx(ctx);
+ return ERR_PTR(err);
+ }
+
+ return ctx;
+}
+EXPORT_SYMBOL(rtrs_srv_open);
+
+static void close_sessions(struct rtrs_srv *srv)
+{
+ struct rtrs_srv_sess *sess;
+
+ mutex_lock(&srv->paths_mutex);
+ list_for_each_entry(sess, &srv->paths_list, s.entry)
+ close_sess(sess);
+ mutex_unlock(&srv->paths_mutex);
+}
+
+static void close_ctx(struct rtrs_srv_ctx *ctx)
+{
+ struct rtrs_srv *srv;
+
+ mutex_lock(&ctx->srv_mutex);
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list)
+ close_sessions(srv);
+ mutex_unlock(&ctx->srv_mutex);
+ flush_workqueue(rtrs_wq);
+}
+
+/**
+ * rtrs_srv_close() - close RTRS server context
+ * @ctx: pointer to server context
+ *
+ * Closes RTRS server context with all client sessions.
+ */
+void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
+{
+ rdma_destroy_id(ctx->cm_id_ip);
+ rdma_destroy_id(ctx->cm_id_ib);
+ close_ctx(ctx);
+ free_srv_ctx(ctx);
+}
+EXPORT_SYMBOL(rtrs_srv_close);
+
+static int check_module_params(void)
+{
+ if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
+ pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
+ sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
+ return -EINVAL;
+ }
+ if (max_chunk_size < 4096 || !is_power_of_2(max_chunk_size)) {
+ pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
+ max_chunk_size, 4096);
+ return -EINVAL;
+ }
+
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and the
+ * offset inside the memory chunk
+ */
+ if ((ilog2(sess_queue_depth - 1) + 1) +
+ (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
+ pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
+ MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init rtrs_server_init(void)
+{
+ int err;
+
+ pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
+ KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
+ max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
+ sess_queue_depth, always_invalidate);
+
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+
+ err = check_module_params();
+ if (err) {
+ pr_err("Failed to load module, invalid module parameters, err: %d\n",
+ err);
+ return err;
+ }
+ chunk_pool = mempool_create_page_pool(sess_queue_depth * CHUNK_POOL_SZ,
+ get_order(max_chunk_size));
+ if (!chunk_pool)
+ return -ENOMEM;
+ rtrs_dev_class = class_create(THIS_MODULE, "rtrs-server");
+ if (IS_ERR(rtrs_dev_class)) {
+ err = PTR_ERR(rtrs_dev_class);
+ goto out_chunk_pool;
+ }
+ rtrs_wq = alloc_workqueue("rtrs_server_wq", WQ_MEM_RECLAIM, 0);
+ if (!rtrs_wq) {
+ err = -ENOMEM;
+ goto out_dev_class;
+ }
+
+ return 0;
+
+out_dev_class:
+ class_destroy(rtrs_dev_class);
+out_chunk_pool:
+ mempool_destroy(chunk_pool);
+
+ return err;
+}
+
+static void __exit rtrs_server_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_destroy(rtrs_dev_class);
+ mempool_destroy(chunk_pool);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_server_init);
+module_exit(rtrs_server_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
new file mode 100644
index 000000000000..dc95b0932f0d
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_SRV_H
+#define RTRS_SRV_H
+
+#include <linux/device.h>
+#include <linux/refcount.h>
+#include "rtrs-pri.h"
+
+/*
+ * enum rtrs_srv_state - Server states.
+ */
+enum rtrs_srv_state {
+ RTRS_SRV_CONNECTING,
+ RTRS_SRV_CONNECTED,
+ RTRS_SRV_CLOSING,
+ RTRS_SRV_CLOSED,
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-server for details
+ */
+struct rtrs_srv_stats_rdma_stats {
+ struct {
+ atomic64_t cnt;
+ atomic64_t size_total;
+ } dir[2];
+};
+
+struct rtrs_srv_stats {
+ struct kobject kobj_stats;
+ struct rtrs_srv_stats_rdma_stats rdma_stats;
+ struct rtrs_srv_sess *sess;
+};
+
+struct rtrs_srv_con {
+ struct rtrs_con c;
+ atomic_t wr_cnt;
+ atomic_t sq_wr_avail;
+ struct list_head rsp_wr_wait_list;
+ spinlock_t rsp_wr_wait_lock;
+};
+
+/* IO context in rtrs_srv, each io has one */
+struct rtrs_srv_op {
+ struct rtrs_srv_con *con;
+ u32 msg_id;
+ u8 dir;
+ struct rtrs_msg_rdma_read *rd_msg;
+ struct ib_rdma_wr tx_wr;
+ struct ib_sge tx_sg;
+ struct list_head wait_list;
+ int status;
+};
+
+/*
+ * server side memory region context, when always_invalidate=Y, we need
+ * queue_depth of memory regrion to invalidate each memory region.
+ */
+struct rtrs_srv_mr {
+ struct ib_mr *mr;
+ struct sg_table sgt;
+ struct ib_cqe inv_cqe; /* only for always_invalidate=true */
+ u32 msg_id; /* only for always_invalidate=true */
+ u32 msg_off; /* only for always_invalidate=true */
+ struct rtrs_iu *iu; /* send buffer for new rkey msg */
+};
+
+struct rtrs_srv_sess {
+ struct rtrs_sess s;
+ struct rtrs_srv *srv;
+ struct work_struct close_work;
+ enum rtrs_srv_state state;
+ spinlock_t state_lock;
+ int cur_cq_vector;
+ struct rtrs_srv_op **ops_ids;
+ atomic_t ids_inflight;
+ wait_queue_head_t ids_waitq;
+ struct rtrs_srv_mr *mrs;
+ unsigned int mrs_num;
+ dma_addr_t *dma_addr;
+ bool established;
+ unsigned int mem_bits;
+ struct kobject kobj;
+ struct rtrs_srv_stats *stats;
+};
+
+struct rtrs_srv {
+ struct list_head paths_list;
+ int paths_up;
+ struct mutex paths_ev_mutex;
+ size_t paths_num;
+ struct mutex paths_mutex;
+ uuid_t paths_uuid;
+ refcount_t refcount;
+ struct rtrs_srv_ctx *ctx;
+ struct list_head ctx_list;
+ void *priv;
+ size_t queue_depth;
+ struct page **chunks;
+ struct device dev;
+ unsigned int dev_ref;
+ struct kobject *kobj_paths;
+};
+
+struct rtrs_srv_ctx {
+ struct rtrs_srv_ops ops;
+ struct rdma_cm_id *cm_id_ip;
+ struct rdma_cm_id *cm_id_ib;
+ struct mutex srv_mutex;
+ struct list_head srv_list;
+};
+
+extern struct class *rtrs_dev_class;
+
+void close_sess(struct rtrs_srv_sess *sess);
+
+static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
+ size_t size, int d)
+{
+ atomic64_inc(&s->rdma_stats.dir[d].cnt);
+ atomic64_add(size, &s->rdma_stats.dir[d].size_total);
+}
+
+/* functions which are implemented in rtrs-srv-stats.c */
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable);
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
+ char *page, size_t len);
+int rtrs_srv_reset_wc_completion_stats(struct rtrs_srv_stats *stats,
+ bool enable);
+int rtrs_srv_stats_wc_completion_to_str(struct rtrs_srv_stats *stats, char *buf,
+ size_t len);
+int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable);
+ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
+ char *page, size_t len);
+
+/* functions which are implemented in rtrs-srv-sysfs.c */
+int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess);
+void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess);
+
+#endif /* RTRS_SRV_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
new file mode 100644
index 000000000000..ff1093d6e4bc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/inet.h>
+
+#include "rtrs-pri.h"
+#include "rtrs-log.h"
+
+MODULE_DESCRIPTION("RDMA Transport Core");
+MODULE_LICENSE("GPL");
+
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask,
+ struct ib_device *dma_dev,
+ enum dma_data_direction dir,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc))
+{
+ struct rtrs_iu *ius, *iu;
+ int i;
+
+ ius = kcalloc(queue_size, sizeof(*ius), gfp_mask);
+ if (!ius)
+ return NULL;
+ for (i = 0; i < queue_size; i++) {
+ iu = &ius[i];
+ iu->buf = kzalloc(size, gfp_mask);
+ if (!iu->buf)
+ goto err;
+
+ iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
+ if (ib_dma_mapping_error(dma_dev, iu->dma_addr))
+ goto err;
+
+ iu->cqe.done = done;
+ iu->size = size;
+ iu->direction = dir;
+ }
+ return ius;
+err:
+ rtrs_iu_free(ius, dir, dma_dev, i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
+
+void rtrs_iu_free(struct rtrs_iu *ius, enum dma_data_direction dir,
+ struct ib_device *ibdev, u32 queue_size)
+{
+ struct rtrs_iu *iu;
+ int i;
+
+ if (!ius)
+ return;
+
+ for (i = 0; i < queue_size; i++) {
+ iu = &ius[i];
+ ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, dir);
+ kfree(iu->buf);
+ }
+ kfree(ius);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_free);
+
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
+{
+ struct rtrs_sess *sess = con->sess;
+ struct ib_recv_wr wr;
+ struct ib_sge list;
+
+ list.addr = iu->dma_addr;
+ list.length = iu->size;
+ list.lkey = sess->dev->ib_pd->local_dma_lkey;
+
+ if (list.length == 0) {
+ rtrs_wrn(con->sess,
+ "Posting receive work request failed, sg list is empty\n");
+ return -EINVAL;
+ }
+ wr = (struct ib_recv_wr) {
+ .wr_cqe = &iu->cqe,
+ .sg_list = &list,
+ .num_sge = 1,
+ };
+
+ return ib_post_recv(con->qp, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_recv);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr;
+
+ wr = (struct ib_recv_wr) {
+ .wr_cqe = cqe,
+ };
+
+ return ib_post_recv(con->qp, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_post_recv_empty);
+
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head)
+{
+ struct rtrs_sess *sess = con->sess;
+ struct ib_send_wr wr;
+ struct ib_sge list;
+
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
+ list.addr = iu->dma_addr;
+ list.length = size;
+ list.lkey = sess->dev->ib_pd->local_dma_lkey;
+
+ wr = (struct ib_send_wr) {
+ .wr_cqe = &iu->cqe,
+ .sg_list = &list,
+ .num_sge = 1,
+ .opcode = IB_WR_SEND,
+ .send_flags = IB_SEND_SIGNALED,
+ };
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr;
+ } else {
+ head = &wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_send);
+
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head)
+{
+ struct ib_rdma_wr wr;
+ int i;
+
+ wr = (struct ib_rdma_wr) {
+ .wr.wr_cqe = &iu->cqe,
+ .wr.sg_list = sge,
+ .wr.num_sge = num_sge,
+ .rkey = rkey,
+ .remote_addr = rdma_addr,
+ .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .wr.ex.imm_data = cpu_to_be32(imm_data),
+ .wr.send_flags = flags,
+ };
+
+ /*
+ * If one of the sges has 0 size, the operation will fail with a
+ * length error
+ */
+ for (i = 0; i < num_sge; i++)
+ if (WARN_ON(sge[i].length == 0))
+ return -EINVAL;
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr.wr;
+ } else {
+ head = &wr.wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
+
+int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ u32 imm_data, enum ib_send_flags flags,
+ struct ib_send_wr *head)
+{
+ struct ib_send_wr wr;
+
+ wr = (struct ib_send_wr) {
+ .wr_cqe = cqe,
+ .send_flags = flags,
+ .opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .ex.imm_data = cpu_to_be32(imm_data),
+ };
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr;
+ } else {
+ head = &wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
+
+static void qp_event_handler(struct ib_event *ev, void *ctx)
+{
+ struct rtrs_con *con = ctx;
+
+ switch (ev->event) {
+ case IB_EVENT_COMM_EST:
+ rtrs_info(con->sess, "QP event %s (%d) received\n",
+ ib_event_msg(ev->event), ev->event);
+ rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
+ break;
+ default:
+ rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n",
+ ib_event_msg(ev->event), ev->event);
+ break;
+ }
+}
+
+static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
+ enum ib_poll_context poll_ctx)
+{
+ struct rdma_cm_id *cm_id = con->cm_id;
+ struct ib_cq *cq;
+
+ cq = ib_alloc_cq(cm_id->device, con, cq_size,
+ cq_vector, poll_ctx);
+ if (IS_ERR(cq)) {
+ rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
+ PTR_ERR(cq));
+ return PTR_ERR(cq);
+ }
+ con->cq = cq;
+
+ return 0;
+}
+
+static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+ u16 wr_queue_size, u32 max_sge)
+{
+ struct ib_qp_init_attr init_attr = {NULL};
+ struct rdma_cm_id *cm_id = con->cm_id;
+ int ret;
+
+ init_attr.cap.max_send_wr = wr_queue_size;
+ init_attr.cap.max_recv_wr = wr_queue_size;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.event_handler = qp_event_handler;
+ init_attr.qp_context = con;
+ init_attr.cap.max_send_sge = max_sge;
+
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.send_cq = con->cq;
+ init_attr.recv_cq = con->cq;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+
+ ret = rdma_create_qp(cm_id, pd, &init_attr);
+ if (ret) {
+ rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret);
+ return ret;
+ }
+ con->qp = cm_id->qp;
+
+ return ret;
+}
+
+int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, u16 cq_size,
+ u16 wr_queue_size, enum ib_poll_context poll_ctx)
+{
+ int err;
+
+ err = create_cq(con, cq_vector, cq_size, poll_ctx);
+ if (err)
+ return err;
+
+ err = create_qp(con, sess->dev->ib_pd, wr_queue_size, max_send_sge);
+ if (err) {
+ ib_free_cq(con->cq);
+ con->cq = NULL;
+ return err;
+ }
+ con->sess = sess;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtrs_cq_qp_create);
+
+void rtrs_cq_qp_destroy(struct rtrs_con *con)
+{
+ if (con->qp) {
+ rdma_destroy_qp(con->cm_id);
+ con->qp = NULL;
+ }
+ if (con->cq) {
+ ib_free_cq(con->cq);
+ con->cq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
+
+static void schedule_hb(struct rtrs_sess *sess)
+{
+ queue_delayed_work(sess->hb_wq, &sess->hb_dwork,
+ msecs_to_jiffies(sess->hb_interval_ms));
+}
+
+void rtrs_send_hb_ack(struct rtrs_sess *sess)
+{
+ struct rtrs_con *usr_con = sess->con[0];
+ u32 imm;
+ int err;
+
+ imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
+ err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ IB_SEND_SIGNALED, NULL);
+ if (err) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
+
+static void hb_work(struct work_struct *work)
+{
+ struct rtrs_con *usr_con;
+ struct rtrs_sess *sess;
+ u32 imm;
+ int err;
+
+ sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork);
+ usr_con = sess->con[0];
+
+ if (sess->hb_missed_cnt > sess->hb_missed_max) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+ if (sess->hb_missed_cnt++) {
+ /* Reschedule work without sending hb */
+ schedule_hb(sess);
+ return;
+ }
+ imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
+ err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ IB_SEND_SIGNALED, NULL);
+ if (err) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+
+ schedule_hb(sess);
+}
+
+void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq)
+{
+ sess->hb_cqe = cqe;
+ sess->hb_interval_ms = interval_ms;
+ sess->hb_err_handler = err_handler;
+ sess->hb_wq = wq;
+ sess->hb_missed_max = missed_max;
+ sess->hb_missed_cnt = 0;
+ INIT_DELAYED_WORK(&sess->hb_dwork, hb_work);
+}
+EXPORT_SYMBOL_GPL(rtrs_init_hb);
+
+void rtrs_start_hb(struct rtrs_sess *sess)
+{
+ schedule_hb(sess);
+}
+EXPORT_SYMBOL_GPL(rtrs_start_hb);
+
+void rtrs_stop_hb(struct rtrs_sess *sess)
+{
+ cancel_delayed_work_sync(&sess->hb_dwork);
+ sess->hb_missed_cnt = 0;
+ sess->hb_missed_max = 0;
+}
+EXPORT_SYMBOL_GPL(rtrs_stop_hb);
+
+static int rtrs_str_gid_to_sockaddr(const char *addr, size_t len,
+ short port, struct sockaddr_storage *dst)
+{
+ struct sockaddr_ib *dst_ib = (struct sockaddr_ib *)dst;
+ int ret;
+
+ /*
+ * We can use some of the IPv6 functions since GID is a valid
+ * IPv6 address format
+ */
+ ret = in6_pton(addr, len, dst_ib->sib_addr.sib_raw, '\0', NULL);
+ if (ret == 0)
+ return -EINVAL;
+
+ dst_ib->sib_family = AF_IB;
+ /*
+ * Use the same TCP server port number as the IB service ID
+ * on the IB port space range
+ */
+ dst_ib->sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port);
+ dst_ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
+ dst_ib->sib_pkey = cpu_to_be16(0xffff);
+
+ return 0;
+}
+
+/**
+ * rtrs_str_to_sockaddr() - Convert rtrs address string to sockaddr
+ * @addr: String representation of an addr (IPv4, IPv6 or IB GID):
+ * - "ip:192.168.1.1"
+ * - "ip:fe80::200:5aee:feaa:20a2"
+ * - "gid:fe80::200:5aee:feaa:20a2"
+ * @len: String address length
+ * @port: Destination port
+ * @dst: Destination sockaddr structure
+ *
+ * Returns 0 if conversion successful. Non-zero on error.
+ */
+static int rtrs_str_to_sockaddr(const char *addr, size_t len,
+ u16 port, struct sockaddr_storage *dst)
+{
+ if (strncmp(addr, "gid:", 4) == 0) {
+ return rtrs_str_gid_to_sockaddr(addr + 4, len - 4, port, dst);
+ } else if (strncmp(addr, "ip:", 3) == 0) {
+ char port_str[8];
+ char *cpy;
+ int err;
+
+ snprintf(port_str, sizeof(port_str), "%u", port);
+ cpy = kstrndup(addr + 3, len - 3, GFP_KERNEL);
+ err = cpy ? inet_pton_with_scope(&init_net, AF_UNSPEC,
+ cpy, port_str, dst) : -ENOMEM;
+ kfree(cpy);
+
+ return err;
+ }
+ return -EPROTONOSUPPORT;
+}
+
+/**
+ * sockaddr_to_str() - convert sockaddr to a string.
+ * @addr: the sockadddr structure to be converted.
+ * @buf: string containing socket addr.
+ * @len: string length.
+ *
+ * The return value is the number of characters written into buf not
+ * including the trailing '\0'. If len is == 0 the function returns 0..
+ */
+int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
+{
+
+ switch (addr->sa_family) {
+ case AF_IB:
+ return scnprintf(buf, len, "gid:%pI6",
+ &((struct sockaddr_ib *)addr)->sib_addr.sib_raw);
+ case AF_INET:
+ return scnprintf(buf, len, "ip:%pI4",
+ &((struct sockaddr_in *)addr)->sin_addr);
+ case AF_INET6:
+ return scnprintf(buf, len, "ip:%pI6c",
+ &((struct sockaddr_in6 *)addr)->sin6_addr);
+ }
+ return scnprintf(buf, len, "<invalid address family>");
+}
+EXPORT_SYMBOL(sockaddr_to_str);
+
+/**
+ * rtrs_addr_to_sockaddr() - convert path string "src,dst" or "src@dst"
+ * to sockaddreses
+ * @str: string containing source and destination addr of a path
+ * separated by ',' or '@' I.e. "ip:1.1.1.1,ip:1.1.1.2" or
+ * "ip:1.1.1.1@ip:1.1.1.2". If str contains only one address it's
+ * considered to be destination.
+ * @len: string length
+ * @port: Destination port number.
+ * @addr: will be set to the source/destination address or to NULL
+ * if str doesn't contain any source address.
+ *
+ * Returns zero if conversion successful. Non-zero otherwise.
+ */
+int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
+ struct rtrs_addr *addr)
+{
+ const char *d;
+
+ d = strchr(str, ',');
+ if (!d)
+ d = strchr(str, '@');
+ if (d) {
+ if (rtrs_str_to_sockaddr(str, d - str, 0, addr->src))
+ return -EINVAL;
+ d += 1;
+ len -= d - str;
+ str = d;
+
+ } else {
+ addr->src = NULL;
+ }
+ return rtrs_str_to_sockaddr(str, len, port, addr->dst);
+}
+EXPORT_SYMBOL(rtrs_addr_to_sockaddr);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool)
+{
+ WARN_ON(pool->ops && (!pool->ops->alloc ^ !pool->ops->free));
+ INIT_LIST_HEAD(&pool->list);
+ mutex_init(&pool->mutex);
+ pool->pd_flags = pd_flags;
+}
+EXPORT_SYMBOL(rtrs_rdma_dev_pd_init);
+
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool)
+{
+ mutex_destroy(&pool->mutex);
+ WARN_ON(!list_empty(&pool->list));
+}
+EXPORT_SYMBOL(rtrs_rdma_dev_pd_deinit);
+
+static void dev_free(struct kref *ref)
+{
+ struct rtrs_rdma_dev_pd *pool;
+ struct rtrs_ib_dev *dev;
+
+ dev = container_of(ref, typeof(*dev), ref);
+ pool = dev->pool;
+
+ mutex_lock(&pool->mutex);
+ list_del(&dev->entry);
+ mutex_unlock(&pool->mutex);
+
+ if (pool->ops && pool->ops->deinit)
+ pool->ops->deinit(dev);
+
+ ib_dealloc_pd(dev->ib_pd);
+
+ if (pool->ops && pool->ops->free)
+ pool->ops->free(dev);
+ else
+ kfree(dev);
+}
+
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev)
+{
+ return kref_put(&dev->ref, dev_free);
+}
+EXPORT_SYMBOL(rtrs_ib_dev_put);
+
+static int rtrs_ib_dev_get(struct rtrs_ib_dev *dev)
+{
+ return kref_get_unless_zero(&dev->ref);
+}
+
+struct rtrs_ib_dev *
+rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool)
+{
+ struct rtrs_ib_dev *dev;
+
+ mutex_lock(&pool->mutex);
+ list_for_each_entry(dev, &pool->list, entry) {
+ if (dev->ib_dev->node_guid == ib_dev->node_guid &&
+ rtrs_ib_dev_get(dev))
+ goto out_unlock;
+ }
+ mutex_unlock(&pool->mutex);
+ if (pool->ops && pool->ops->alloc)
+ dev = pool->ops->alloc();
+ else
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(dev))
+ goto out_err;
+
+ kref_init(&dev->ref);
+ dev->pool = pool;
+ dev->ib_dev = ib_dev;
+ dev->ib_pd = ib_alloc_pd(ib_dev, pool->pd_flags);
+ if (IS_ERR(dev->ib_pd))
+ goto out_free_dev;
+
+ if (pool->ops && pool->ops->init && pool->ops->init(dev))
+ goto out_free_pd;
+
+ mutex_lock(&pool->mutex);
+ list_add(&dev->entry, &pool->list);
+out_unlock:
+ mutex_unlock(&pool->mutex);
+ return dev;
+
+out_free_pd:
+ ib_dealloc_pd(dev->ib_pd);
+out_free_dev:
+ if (pool->ops && pool->ops->free)
+ pool->ops->free(dev);
+ else
+ kfree(dev);
+out_err:
+ return NULL;
+}
+EXPORT_SYMBOL(rtrs_ib_dev_find_or_add);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
new file mode 100644
index 000000000000..9af750f4d783
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_H
+#define RTRS_H
+
+#include <linux/socket.h>
+#include <linux/scatterlist.h>
+
+struct rtrs_permit;
+struct rtrs_clt;
+struct rtrs_srv_ctx;
+struct rtrs_srv;
+struct rtrs_srv_op;
+
+/*
+ * RDMA transport (RTRS) client API
+ */
+
+/**
+ * enum rtrs_clt_link_ev - Events about connectivity state of a client
+ * @RTRS_CLT_LINK_EV_RECONNECTED Client was reconnected.
+ * @RTRS_CLT_LINK_EV_DISCONNECTED Client was disconnected.
+ */
+enum rtrs_clt_link_ev {
+ RTRS_CLT_LINK_EV_RECONNECTED,
+ RTRS_CLT_LINK_EV_DISCONNECTED,
+};
+
+/**
+ * Source and destination address of a path to be established
+ */
+struct rtrs_addr {
+ struct sockaddr_storage *src;
+ struct sockaddr_storage *dst;
+};
+
+/**
+ * rtrs_clt_ops - it holds the link event callback and private pointer.
+ * @priv: User supplied private data.
+ * @link_ev: Event notification callback function for connection state changes
+ * @priv: User supplied data that was passed to rtrs_clt_open()
+ * @ev: Occurred event
+ */
+struct rtrs_clt_ops {
+ void *priv;
+ void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev);
+};
+
+struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t path_cnt, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ u16 max_segments,
+ size_t max_segment_size,
+ s16 max_reconnect_attempts);
+
+void rtrs_clt_close(struct rtrs_clt *sess);
+
+/**
+ * rtrs_permit_to_pdu() - converts rtrs_permit to opaque pdu pointer
+ * @permit: RTRS permit pointer, it associates the memory allocation for future
+ * RDMA operation.
+ */
+void *rtrs_permit_to_pdu(struct rtrs_permit *permit);
+
+enum {
+ RTRS_PERMIT_NOWAIT = 0,
+ RTRS_PERMIT_WAIT = 1,
+};
+
+/**
+ * enum rtrs_clt_con_type() type of ib connection to use with a given
+ * rtrs_permit
+ * @ADMIN_CON - use connection reserved for "service" messages
+ * @IO_CON - use a connection reserved for IO
+ */
+enum rtrs_clt_con_type {
+ RTRS_ADMIN_CON,
+ RTRS_IO_CON
+};
+
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess,
+ enum rtrs_clt_con_type con_type,
+ int wait);
+
+void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit);
+
+/**
+ * rtrs_clt_req_ops - it holds the request confirmation callback
+ * and a private pointer.
+ * @priv: User supplied private data.
+ * @conf_fn: callback function to be called as confirmation
+ * @priv: User provided data, passed back with corresponding
+ * @(conf) confirmation.
+ * @errno: error number.
+ */
+struct rtrs_clt_req_ops {
+ void *priv;
+ void (*conf_fn)(void *priv, int errno);
+};
+
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt *sess, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t len,
+ struct scatterlist *sg, unsigned int sg_cnt);
+
+/**
+ * rtrs_attrs - RTRS session attributes
+ */
+struct rtrs_attrs {
+ u32 queue_depth;
+ u32 max_io_size;
+ u8 sessname[NAME_MAX];
+ struct kobject *sess_kobj;
+};
+
+int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
+
+/*
+ * Here goes RTRS server API
+ */
+
+/**
+ * enum rtrs_srv_link_ev - Server link events
+ * @RTRS_SRV_LINK_EV_CONNECTED: Connection from client established
+ * @RTRS_SRV_LINK_EV_DISCONNECTED: Connection was disconnected, all
+ * connection RTRS resources were freed.
+ */
+enum rtrs_srv_link_ev {
+ RTRS_SRV_LINK_EV_CONNECTED,
+ RTRS_SRV_LINK_EV_DISCONNECTED,
+};
+
+struct rtrs_srv_ops {
+ /**
+ * rdma_ev(): Event notification for RDMA operations
+ * If the callback returns a value != 0, an error
+ * message for the data transfer will be sent to
+ * the client.
+
+ * @sess: Session
+ * @priv: Private data set by rtrs_srv_set_sess_priv()
+ * @id: internal RTRS operation id
+ * @dir: READ/WRITE
+ * @data: Pointer to (bidirectional) rdma memory area:
+ * - in case of %RTRS_SRV_RDMA_EV_RECV contains
+ * data sent by the client
+ * - in case of %RTRS_SRV_RDMA_EV_WRITE_REQ points
+ * to the memory area where the response is to be
+ * written to
+ * @datalen: Size of the memory area in @data
+ * @usr: The extra user message sent by the client (%vec)
+ * @usrlen: Size of the user message
+ */
+ int (*rdma_ev)(struct rtrs_srv *sess, void *priv,
+ struct rtrs_srv_op *id, int dir,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen);
+ /**
+ * link_ev(): Events about connectivity state changes
+ * If the callback returns != 0 and the event
+ * %RTRS_SRV_LINK_EV_CONNECTED the corresponding
+ * session will be destroyed.
+ * @sess: Session
+ * @ev: event
+ * @priv: Private data from user if previously set with
+ * rtrs_srv_set_sess_priv()
+ */
+ int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev,
+ void *priv);
+};
+
+struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port);
+
+void rtrs_srv_close(struct rtrs_srv_ctx *ctx);
+
+bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno);
+
+void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv);
+
+int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len);
+
+int rtrs_srv_get_queue_depth(struct rtrs_srv *sess);
+
+int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
+ struct rtrs_addr *addr);
+
+int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len);
+#endif
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index cd1181c39ed2..d8fcd21ab472 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -71,7 +71,6 @@ static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
-static bool prefer_fr = true;
static bool register_always = true;
static bool never_register;
static int topspin_workarounds = 1;
@@ -95,10 +94,6 @@ module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
-module_param(prefer_fr, bool, 0444);
-MODULE_PARM_DESC(prefer_fr,
-"Whether to use fast registration if both FMR and fast registration are supported");
-
module_param(register_always, bool, 0444);
MODULE_PARM_DESC(register_always,
"Use memory registration even for contiguous memory regions");
@@ -146,7 +141,7 @@ module_param(ch_count, uint, 0444);
MODULE_PARM_DESC(ch_count,
"Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
-static void srp_add_one(struct ib_device *device);
+static int srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_rename_dev(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -388,24 +383,6 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
srp_new_ib_cm_id(ch);
}
-static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
-{
- struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_fmr_pool_param fmr_param;
-
- memset(&fmr_param, 0, sizeof(fmr_param));
- fmr_param.pool_size = target->mr_pool_size;
- fmr_param.dirty_watermark = fmr_param.pool_size / 4;
- fmr_param.cache = 1;
- fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
- fmr_param.page_shift = ilog2(dev->mr_page_size);
- fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- return ib_create_fmr_pool(dev->pd, &fmr_param);
-}
-
/**
* srp_destroy_fr_pool() - free the resources owned by a pool
* @pool: Fast registration pool to be destroyed.
@@ -556,7 +533,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
struct ib_qp_init_attr *init_attr;
struct ib_cq *recv_cq, *send_cq;
struct ib_qp *qp;
- struct ib_fmr_pool *fmr_pool = NULL;
struct srp_fr_pool *fr_pool = NULL;
const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
int ret;
@@ -619,14 +595,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
"FR pool allocation failed (%d)\n", ret);
goto err_qp;
}
- } else if (dev->use_fmr) {
- fmr_pool = srp_alloc_fmr_pool(target);
- if (IS_ERR(fmr_pool)) {
- ret = PTR_ERR(fmr_pool);
- shost_printk(KERN_WARNING, target->scsi_host, PFX
- "FMR pool allocation failed (%d)\n", ret);
- goto err_qp;
- }
}
if (ch->qp)
@@ -644,10 +612,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
ch->fr_pool = fr_pool;
- } else if (dev->use_fmr) {
- if (ch->fmr_pool)
- ib_destroy_fmr_pool(ch->fmr_pool);
- ch->fmr_pool = fmr_pool;
}
kfree(init_attr);
@@ -702,9 +666,6 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (dev->use_fast_reg) {
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
- } else if (dev->use_fmr) {
- if (ch->fmr_pool)
- ib_destroy_fmr_pool(ch->fmr_pool);
}
srp_destroy_qp(ch);
@@ -1017,12 +978,8 @@ static void srp_free_req_data(struct srp_target_port *target,
for (i = 0; i < target->req_ring_size; ++i) {
req = &ch->req_ring[i];
- if (dev->use_fast_reg) {
+ if (dev->use_fast_reg)
kfree(req->fr_list);
- } else {
- kfree(req->fmr_list);
- kfree(req->map_page);
- }
if (req->indirect_dma_addr) {
ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
target->indirect_size,
@@ -1056,16 +1013,8 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
GFP_KERNEL);
if (!mr_list)
goto out;
- if (srp_dev->use_fast_reg) {
+ if (srp_dev->use_fast_reg)
req->fr_list = mr_list;
- } else {
- req->fmr_list = mr_list;
- req->map_page = kmalloc_array(srp_dev->max_pages_per_mr,
- sizeof(void *),
- GFP_KERNEL);
- if (!req->map_page)
- goto out;
- }
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
if (!req->indirect_desc)
goto out;
@@ -1272,11 +1221,6 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
if (req->nmdesc)
srp_fr_pool_put(ch->fr_pool, req->fr_list,
req->nmdesc);
- } else if (dev->use_fmr) {
- struct ib_pool_fmr **pfmr;
-
- for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
- ib_fmr_pool_unmap(*pfmr);
}
ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
@@ -1472,50 +1416,6 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
state->ndesc++;
}
-static int srp_map_finish_fmr(struct srp_map_state *state,
- struct srp_rdma_ch *ch)
-{
- struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_pool_fmr *fmr;
- u64 io_addr = 0;
-
- if (state->fmr.next >= state->fmr.end) {
- shost_printk(KERN_ERR, ch->target->scsi_host,
- PFX "Out of MRs (mr_per_cmd = %d)\n",
- ch->target->mr_per_cmd);
- return -ENOMEM;
- }
-
- WARN_ON_ONCE(!dev->use_fmr);
-
- if (state->npages == 0)
- return 0;
-
- if (state->npages == 1 && target->global_rkey) {
- srp_map_desc(state, state->base_dma_addr, state->dma_len,
- target->global_rkey);
- goto reset_state;
- }
-
- fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
- state->npages, io_addr);
- if (IS_ERR(fmr))
- return PTR_ERR(fmr);
-
- *state->fmr.next++ = fmr;
- state->nmdesc++;
-
- srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
- state->dma_len, fmr->fmr->rkey);
-
-reset_state:
- state->npages = 0;
- state->dma_len = 0;
-
- return 0;
-}
-
static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
{
srp_handle_qp_err(cq, wc, "FAST REG");
@@ -1606,74 +1506,6 @@ static int srp_map_finish_fr(struct srp_map_state *state,
return n;
}
-static int srp_map_sg_entry(struct srp_map_state *state,
- struct srp_rdma_ch *ch,
- struct scatterlist *sg)
-{
- struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
- dma_addr_t dma_addr = sg_dma_address(sg);
- unsigned int dma_len = sg_dma_len(sg);
- unsigned int len = 0;
- int ret;
-
- WARN_ON_ONCE(!dma_len);
-
- while (dma_len) {
- unsigned offset = dma_addr & ~dev->mr_page_mask;
-
- if (state->npages == dev->max_pages_per_mr ||
- (state->npages > 0 && offset != 0)) {
- ret = srp_map_finish_fmr(state, ch);
- if (ret)
- return ret;
- }
-
- len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
-
- if (!state->npages)
- state->base_dma_addr = dma_addr;
- state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
- state->dma_len += len;
- dma_addr += len;
- dma_len -= len;
- }
-
- /*
- * If the end of the MR is not on a page boundary then we need to
- * close it out and start a new one -- we can only merge at page
- * boundaries.
- */
- ret = 0;
- if ((dma_addr & ~dev->mr_page_mask) != 0)
- ret = srp_map_finish_fmr(state, ch);
- return ret;
-}
-
-static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
- struct srp_request *req, struct scatterlist *scat,
- int count)
-{
- struct scatterlist *sg;
- int i, ret;
-
- state->pages = req->map_page;
- state->fmr.next = req->fmr_list;
- state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
-
- for_each_sg(scat, sg, count, i) {
- ret = srp_map_sg_entry(state, ch, sg);
- if (ret)
- return ret;
- }
-
- ret = srp_map_finish_fmr(state, ch);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat,
int count)
@@ -1733,7 +1565,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
struct srp_device *dev = target->srp_host->srp_dev;
struct srp_map_state state;
struct srp_direct_buf idb_desc;
- u64 idb_pages[1];
struct scatterlist idb_sg[1];
int ret;
@@ -1756,14 +1587,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
if (ret < 0)
return ret;
WARN_ON_ONCE(ret < 1);
- } else if (dev->use_fmr) {
- state.pages = idb_pages;
- state.pages[0] = (req->indirect_dma_addr &
- dev->mr_page_mask);
- state.npages = 1;
- ret = srp_map_finish_fmr(&state, ch);
- if (ret < 0)
- return ret;
} else {
return -EINVAL;
}
@@ -1787,9 +1610,6 @@ static void srp_check_mapping(struct srp_map_state *state,
if (dev->use_fast_reg)
for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
mr_len += (*pfr)->mr->length;
- else if (dev->use_fmr)
- for (i = 0; i < state->nmdesc; i++)
- mr_len += be32_to_cpu(req->indirect_desc[i].len);
if (desc_len != scsi_bufflen(req->scmnd) ||
mr_len > scsi_bufflen(req->scmnd))
pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
@@ -1904,8 +1724,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
state.desc = req->indirect_desc;
if (dev->use_fast_reg)
ret = srp_map_sg_fr(&state, ch, req, scat, count);
- else if (dev->use_fmr)
- ret = srp_map_sg_fmr(&state, ch, req, scat, count);
else
ret = srp_map_sg_dma(&state, ch, req, scat, count);
req->nmdesc = state.nmdesc;
@@ -3424,6 +3242,7 @@ enum {
SRP_OPT_IP_DEST = 1 << 16,
SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
+ SRP_OPT_CH_COUNT = 1 << 19,
};
static unsigned int srp_opt_mandatory[] = {
@@ -3457,6 +3276,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_IP_SRC, "src=%s" },
{ SRP_OPT_IP_DEST, "dest=%s" },
{ SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
+ { SRP_OPT_CH_COUNT, "ch_count=%u", },
{ SRP_OPT_ERR, NULL }
};
@@ -3758,6 +3578,14 @@ static int srp_parse_options(struct net *net, const char *buf,
target->max_it_iu_size = token;
break;
+ case SRP_OPT_CH_COUNT:
+ if (match_int(args, &token) || token < 1) {
+ pr_warn("bad channel count %s\n", p);
+ goto out;
+ }
+ target->ch_count = token;
+ break;
+
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -3864,13 +3692,13 @@ static ssize_t srp_create_target(struct device *dev,
goto out;
}
- if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
+ if (!srp_dev->has_fr && !target->allow_ext_sg &&
target->cmd_sg_cnt < target->sg_tablesize) {
pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
target->sg_tablesize = target->cmd_sg_cnt;
}
- if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
+ if (srp_dev->use_fast_reg) {
bool gaps_reg = (ibdev->attrs.device_cap_flags &
IB_DEVICE_SG_GAPS_REG);
@@ -3878,12 +3706,12 @@ static ssize_t srp_create_target(struct device *dev,
(ilog2(srp_dev->mr_page_size) - 9);
if (!gaps_reg) {
/*
- * FR and FMR can only map one HCA page per entry. If
- * the start address is not aligned on a HCA page
- * boundary two entries will be used for the head and
- * the tail although these two entries combined
- * contain at most one HCA page of data. Hence the "+
- * 1" in the calculation below.
+ * FR can only map one HCA page per entry. If the start
+ * address is not aligned on a HCA page boundary two
+ * entries will be used for the head and the tail
+ * although these two entries combined contain at most
+ * one HCA page of data. Hence the "+ 1" in the
+ * calculation below.
*
* The indirect data buffer descriptor is contiguous
* so the memory for that buffer will only be
@@ -3921,11 +3749,13 @@ static ssize_t srp_create_target(struct device *dev,
goto out;
ret = -ENOMEM;
- target->ch_count = max_t(unsigned, num_online_nodes(),
- min(ch_count ? :
- min(4 * num_online_nodes(),
- ibdev->num_comp_vectors),
- num_online_cpus()));
+ if (target->ch_count == 0)
+ target->ch_count =
+ max_t(unsigned int, num_online_nodes(),
+ min(ch_count ?:
+ min(4 * num_online_nodes(),
+ ibdev->num_comp_vectors),
+ num_online_cpus()));
target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
GFP_KERNEL);
if (!target->ch)
@@ -4132,7 +3962,7 @@ static void srp_rename_dev(struct ib_device *device, void *client_data)
}
}
-static void srp_add_one(struct ib_device *device)
+static int srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct ib_device_attr *attr = &device->attrs;
@@ -4144,7 +3974,7 @@ static void srp_add_one(struct ib_device *device)
srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
- return;
+ return -ENOMEM;
/*
* Use the smallest page size supported by the HCA, down to a
@@ -4162,23 +3992,15 @@ static void srp_add_one(struct ib_device *device)
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
- srp_dev->has_fmr = (device->ops.alloc_fmr &&
- device->ops.dealloc_fmr &&
- device->ops.map_phys_fmr &&
- device->ops.unmap_fmr);
srp_dev->has_fr = (attr->device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
- if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
- dev_warn(&device->dev, "neither FMR nor FR is supported\n");
- } else if (!never_register &&
- attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
- srp_dev->use_fast_reg = (srp_dev->has_fr &&
- (!srp_dev->has_fmr || prefer_fr));
- srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
- }
+ if (!never_register && !srp_dev->has_fr)
+ dev_warn(&device->dev, "FR is not supported\n");
+ else if (!never_register &&
+ attr->max_mr_size >= 2 * srp_dev->mr_page_size)
+ srp_dev->use_fast_reg = srp_dev->has_fr;
- if (never_register || !register_always ||
- (!srp_dev->has_fmr && !srp_dev->has_fr))
+ if (never_register || !register_always || !srp_dev->has_fr)
flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
if (srp_dev->use_fast_reg) {
@@ -4197,8 +4019,12 @@ static void srp_add_one(struct ib_device *device)
srp_dev->dev = device;
srp_dev->pd = ib_alloc_pd(device, flags);
- if (IS_ERR(srp_dev->pd))
- goto free_dev;
+ if (IS_ERR(srp_dev->pd)) {
+ int ret = PTR_ERR(srp_dev->pd);
+
+ kfree(srp_dev);
+ return ret;
+ }
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
@@ -4212,10 +4038,7 @@ static void srp_add_one(struct ib_device *device)
}
ib_set_client_data(device, &srp_client, srp_dev);
- return;
-
-free_dev:
- kfree(srp_dev);
+ return 0;
}
static void srp_remove_one(struct ib_device *device, void *client_data)
@@ -4225,8 +4048,6 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
struct srp_target_port *target;
srp_dev = client_data;
- if (!srp_dev)
- return;
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
device_unregister(&host->dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 6fabcc2faf1f..6818cac0a3b7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -44,7 +44,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
-#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
enum {
@@ -95,8 +94,7 @@ enum srp_iu_type {
/*
* @mr_page_mask: HCA memory registration page mask.
* @mr_page_size: HCA memory registration page size.
- * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
- * request.
+ * @mr_max_size: Maximum size in bytes of a single FR registration request.
*/
struct srp_device {
struct list_head dev_list;
@@ -107,9 +105,7 @@ struct srp_device {
int mr_page_size;
int mr_max_size;
int max_pages_per_mr;
- bool has_fmr;
bool has_fr;
- bool use_fmr;
bool use_fast_reg;
};
@@ -127,11 +123,7 @@ struct srp_host {
struct srp_request {
struct scsi_cmnd *scmnd;
struct srp_iu *cmd;
- union {
- struct ib_pool_fmr **fmr_list;
- struct srp_fr_desc **fr_list;
- };
- u64 *map_page;
+ struct srp_fr_desc **fr_list;
struct srp_direct_buf *indirect_desc;
dma_addr_t indirect_dma_addr;
short nmdesc;
@@ -155,10 +147,7 @@ struct srp_rdma_ch {
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
struct ib_qp *qp;
- union {
- struct ib_fmr_pool *fmr_pool;
- struct srp_fr_pool *fr_pool;
- };
+ struct srp_fr_pool *fr_pool;
uint32_t max_it_iu_len;
uint32_t max_ti_iu_len;
u8 max_imm_sge;
@@ -319,20 +308,16 @@ struct srp_fr_pool {
* @pages: Array with DMA addresses of pages being considered for
* memory registration.
* @base_dma_addr: DMA address of the first page that has not yet been mapped.
- * @dma_len: Number of bytes that will be registered with the next
- * FMR or FR memory registration call.
+ * @dma_len: Number of bytes that will be registered with the next FR
+ * memory registration call.
* @total_len: Total number of bytes in the sg-list being mapped.
* @npages: Number of page addresses in the pages[] array.
- * @nmdesc: Number of FMR or FR memory descriptors used for mapping.
+ * @nmdesc: Number of FR memory descriptors used for mapping.
* @ndesc: Number of SRP buffer descriptors that have been filled in.
*/
struct srp_map_state {
union {
struct {
- struct ib_pool_fmr **next;
- struct ib_pool_fmr **end;
- } fmr;
- struct {
struct srp_fr_desc **next;
struct srp_fr_desc **end;
} fr;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 98552749d71c..ef7fcd3e8e15 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -81,7 +81,7 @@ MODULE_PARM_DESC(srpt_srq_size,
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
- return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
+ return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
}
module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
0444);
@@ -135,14 +135,11 @@ static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
static void srpt_event_handler(struct ib_event_handler *handler,
struct ib_event *event)
{
- struct srpt_device *sdev;
+ struct srpt_device *sdev =
+ container_of(handler, struct srpt_device, event_handler);
struct srpt_port *sport;
u8 port_num;
- sdev = ib_get_client_data(event->device, &srpt_client);
- if (!sdev || sdev->device != event->device)
- return;
-
pr_debug("ASYNC event= %d on device= %s\n", event->event,
dev_name(&sdev->device->dev));
@@ -217,8 +214,9 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
- pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
- event->event, ch, ch->sess_name, ch->state);
+ pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
+ event->event, ch, ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
switch (event->event) {
case IB_EVENT_COMM_EST:
@@ -610,6 +608,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
dev_name(&sport->sdev->device->dev), sport->port,
PTR_ERR(sport->mad_agent));
sport->mad_agent = NULL;
+ memset(&port_modify, 0, sizeof(port_modify));
+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ ib_modify_port(sport->sdev->device, sport->port, 0,
+ &port_modify);
+
}
}
@@ -633,9 +636,8 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
WARN_ON(sport->port != i);
- if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
- pr_err("disabling MAD processing failed.\n");
if (sport->mad_agent) {
+ ib_modify_port(sdev->device, i, 0, &port_modify);
ib_unregister_mad_agent(sport->mad_agent);
sport->mad_agent = NULL;
}
@@ -1814,18 +1816,13 @@ retry:
*/
qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
qp_init->cap.max_rdma_ctxs = sq_size / 2;
- qp_init->cap.max_send_sge = min(attrs->max_send_sge,
- SRPT_MAX_SG_PER_WQE);
- qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
- SRPT_MAX_SG_PER_WQE);
+ qp_init->cap.max_send_sge = attrs->max_send_sge;
+ qp_init->cap.max_recv_sge = 1;
qp_init->port_num = ch->sport->port;
- if (sdev->use_srq) {
+ if (sdev->use_srq)
qp_init->srq = sdev->srq;
- } else {
+ else
qp_init->cap.max_recv_wr = ch->rq_size;
- qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
- SRPT_MAX_SG_PER_WQE);
- }
if (ch->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
@@ -1984,8 +1981,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport)
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
if (srpt_disconnect_ch(ch) >= 0)
- pr_info("Closing channel %s because target %s_%d has been disabled\n",
- ch->sess_name,
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
dev_name(&sport->sdev->device->dev),
sport->port);
srpt_close_ch(ch);
@@ -2496,7 +2493,8 @@ reject:
SRP_BUF_FORMAT_INDIRECT);
if (rdma_cm_id)
- rdma_reject(rdma_cm_id, rej, sizeof(*rej));
+ rdma_reject(rdma_cm_id, rej, sizeof(*rej),
+ IB_CM_REJ_CONSUMER_DEFINED);
else
ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
rej, sizeof(*rej));
@@ -3104,7 +3102,7 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
* srpt_add_one - InfiniBand device addition callback function
* @device: Describes a HCA.
*/
-static void srpt_add_one(struct ib_device *device)
+static int srpt_add_one(struct ib_device *device)
{
struct srpt_device *sdev;
struct srpt_port *sport;
@@ -3115,14 +3113,16 @@ static void srpt_add_one(struct ib_device *device)
sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!sdev)
- goto err;
+ return -ENOMEM;
sdev->device = device;
mutex_init(&sdev->sdev_mutex);
sdev->pd = ib_alloc_pd(device, 0);
- if (IS_ERR(sdev->pd))
+ if (IS_ERR(sdev->pd)) {
+ ret = PTR_ERR(sdev->pd);
goto free_dev;
+ }
sdev->lkey = sdev->pd->local_dma_lkey;
@@ -3138,6 +3138,7 @@ static void srpt_add_one(struct ib_device *device)
if (IS_ERR(sdev->cm_id)) {
pr_info("ib_create_cm_id() failed: %ld\n",
PTR_ERR(sdev->cm_id));
+ ret = PTR_ERR(sdev->cm_id);
sdev->cm_id = NULL;
if (!rdma_cm_id)
goto err_ring;
@@ -3182,7 +3183,8 @@ static void srpt_add_one(struct ib_device *device)
mutex_init(&sport->port_gid_id.mutex);
INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
- if (srpt_refresh_port(sport)) {
+ ret = srpt_refresh_port(sport);
+ if (ret) {
pr_err("MAD registration failed for %s-%d.\n",
dev_name(&sdev->device->dev), i);
goto err_event;
@@ -3193,10 +3195,9 @@ static void srpt_add_one(struct ib_device *device)
list_add_tail(&sdev->list, &srpt_dev_list);
spin_unlock(&srpt_dev_lock);
-out:
ib_set_client_data(device, &srpt_client, sdev);
pr_debug("added %s.\n", dev_name(&device->dev));
- return;
+ return 0;
err_event:
ib_unregister_event_handler(&sdev->event_handler);
@@ -3208,10 +3209,8 @@ err_ring:
ib_dealloc_pd(sdev->pd);
free_dev:
kfree(sdev);
-err:
- sdev = NULL;
pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
- goto out;
+ return ret;
}
/**
@@ -3224,12 +3223,6 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
struct srpt_device *sdev = client_data;
int i;
- if (!sdev) {
- pr_info("%s(%s): nothing to do.\n", __func__,
- dev_name(&device->dev));
- return;
- }
-
srpt_unregister_mad_agent(sdev);
ib_unregister_event_handler(&sdev->event_handler);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 2e1a69840857..f31c349d07a1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -105,11 +105,6 @@ enum {
SRP_CMD_ACA = 0x4,
SRPT_DEF_SG_TABLESIZE = 128,
- /*
- * An experimentally determined value that avoids that QP creation
- * fails due to "swiotlb buffer is full" on systems using the swiotlb.
- */
- SRPT_MAX_SG_PER_WQE = 16,
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,