aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/iser
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/iser')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c12
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h64
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c12
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c121
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c156
5 files changed, 126 insertions, 239 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9c185a8dabd3..c7a3d75fb308 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -205,7 +205,8 @@ iser_initialize_task_headers(struct iscsi_task *task,
goto out;
}
- tx_desc->wr_idx = 0;
+ tx_desc->inv_wr.next = NULL;
+ tx_desc->reg_wr.wr.next = NULL;
tx_desc->mapped = true;
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
@@ -406,13 +407,10 @@ static u8
iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ?
+ ISER_DIR_IN : ISER_DIR_OUT;
- if (iser_task->dir[ISER_DIR_IN])
- return iser_check_task_pi_status(iser_task, ISER_DIR_IN,
- sector);
- else
- return iser_check_task_pi_status(iser_task, ISER_DIR_OUT,
- sector);
+ return iser_check_task_pi_status(iser_task, dir, sector);
}
/**
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 36d525110fd2..39bf213444cb 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -225,14 +225,6 @@ enum iser_desc_type {
ISCSI_TX_DATAOUT
};
-/* Maximum number of work requests per task:
- * Data memory region local invalidate + fast registration
- * Protection memory region local invalidate + fast registration
- * Signature memory region local invalidate + fast registration
- * PDU send
- */
-#define ISER_MAX_WRS 7
-
/**
* struct iser_tx_desc - iSER TX descriptor
*
@@ -245,11 +237,9 @@ enum iser_desc_type {
* unsolicited data-out or control
* @num_sge: number sges used on this TX task
* @mapped: Is the task header mapped
- * @wr_idx: Current WR index
- * @wrs: Array of WRs per task
- * @data_reg: Data buffer registration details
- * @prot_reg: Protection buffer registration details
- * @sig_attrs: Signature attributes
+ * reg_wr: registration WR
+ * send_wr: send WR
+ * inv_wr: invalidate WR
*/
struct iser_tx_desc {
struct iser_ctrl iser_header;
@@ -260,15 +250,9 @@ struct iser_tx_desc {
int num_sge;
struct ib_cqe cqe;
bool mapped;
- u8 wr_idx;
- union iser_wr {
- struct ib_send_wr send;
- struct ib_reg_wr fast_reg;
- struct ib_sig_handover_wr sig;
- } wrs[ISER_MAX_WRS];
- struct iser_mem_reg data_reg;
- struct iser_mem_reg prot_reg;
- struct ib_sig_attrs sig_attrs;
+ struct ib_reg_wr reg_wr;
+ struct ib_send_wr send_wr;
+ struct ib_send_wr inv_wr;
};
#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
@@ -388,6 +372,7 @@ struct iser_device {
*
* @mr: memory region
* @fmr_pool: pool of fmrs
+ * @sig_mr: signature memory region
* @page_vec: fast reg page list used by fmr pool
* @mr_valid: is mr valid indicator
*/
@@ -396,36 +381,22 @@ struct iser_reg_resources {
struct ib_mr *mr;
struct ib_fmr_pool *fmr_pool;
};
+ struct ib_mr *sig_mr;
struct iser_page_vec *page_vec;
u8 mr_valid:1;
};
/**
- * struct iser_pi_context - Protection information context
- *
- * @rsc: protection buffer registration resources
- * @sig_mr: signature enable memory region
- * @sig_mr_valid: is sig_mr valid indicator
- * @sig_protected: is region protected indicator
- */
-struct iser_pi_context {
- struct iser_reg_resources rsc;
- struct ib_mr *sig_mr;
- u8 sig_mr_valid:1;
- u8 sig_protected:1;
-};
-
-/**
* struct iser_fr_desc - Fast registration descriptor
*
* @list: entry in connection fastreg pool
* @rsc: data buffer registration resources
- * @pi_ctx: protection information context
+ * @sig_protected: is region protected indicator
*/
struct iser_fr_desc {
struct list_head list;
struct iser_reg_resources rsc;
- struct iser_pi_context *pi_ctx;
+ bool sig_protected;
struct list_head all_list;
};
@@ -674,21 +645,6 @@ void
iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
struct iser_fr_desc *desc);
-static inline struct ib_send_wr *
-iser_tx_next_wr(struct iser_tx_desc *tx_desc)
-{
- struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send;
- struct ib_send_wr *last_wr;
-
- if (tx_desc->wr_idx) {
- last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send;
- last_wr->next = cur_wr;
- }
- tx_desc->wr_idx++;
-
- return cur_wr;
-}
-
static inline struct iser_conn *
to_iser_conn(struct ib_conn *ib_conn)
{
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 96af06cfe0af..5cbb4b3a0566 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -592,15 +592,14 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
static inline int
iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
- if (likely(rkey == desc->rsc.mr->rkey)) {
- desc->rsc.mr_valid = 0;
- } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) {
- desc->pi_ctx->sig_mr_valid = 0;
- } else {
+ if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
+ (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
return -EINVAL;
}
+ desc->rsc.mr_valid = 0;
+
return 0;
}
@@ -750,6 +749,9 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
iser_task->prot[ISER_DIR_IN].data_len = 0;
iser_task->prot[ISER_DIR_OUT].data_len = 0;
+ iser_task->prot[ISER_DIR_IN].dma_nents = 0;
+ iser_task->prot[ISER_DIR_OUT].dma_nents = 0;
+
memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
sizeof(struct iser_mem_reg));
memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2ba70729d7b0..2cc89a9b9e9b 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -302,8 +302,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
}
static void
-iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
- struct ib_sig_domain *domain)
+iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
@@ -326,21 +325,21 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_STRIP:
sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
- iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
+ iser_set_dif_domain(sc, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
- iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
+ iser_set_dif_domain(sc, &sig_attrs->mem);
sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
+ iser_set_dif_domain(sc, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
- iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
+ iser_set_dif_domain(sc, &sig_attrs->mem);
sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
@@ -366,27 +365,29 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
static inline void
iser_inv_rkey(struct ib_send_wr *inv_wr,
struct ib_mr *mr,
- struct ib_cqe *cqe)
+ struct ib_cqe *cqe,
+ struct ib_send_wr *next_wr)
{
inv_wr->opcode = IB_WR_LOCAL_INV;
inv_wr->wr_cqe = cqe;
inv_wr->ex.invalidate_rkey = mr->rkey;
inv_wr->send_flags = 0;
inv_wr->num_sge = 0;
+ inv_wr->next = next_wr;
}
static int
iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
- struct iser_pi_context *pi_ctx,
- struct iser_mem_reg *data_reg,
- struct iser_mem_reg *prot_reg,
+ struct iser_data_buf *mem,
+ struct iser_data_buf *sig_mem,
+ struct iser_reg_resources *rsc,
struct iser_mem_reg *sig_reg)
{
struct iser_tx_desc *tx_desc = &iser_task->desc;
- struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
- struct ib_sig_handover_wr *wr;
- struct ib_mr *mr = pi_ctx->sig_mr;
+ struct ib_mr *mr = rsc->sig_mr;
+ struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
+ struct ib_reg_wr *wr = &tx_desc->reg_wr;
int ret;
memset(sig_attrs, 0, sizeof(*sig_attrs));
@@ -396,33 +397,36 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
- if (pi_ctx->sig_mr_valid)
- iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);
+ if (rsc->mr_valid)
+ iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- wr = container_of(iser_tx_next_wr(tx_desc), struct ib_sig_handover_wr,
- wr);
- wr->wr.opcode = IB_WR_REG_SIG_MR;
+ ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL,
+ sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K);
+ if (unlikely(ret)) {
+ iser_err("failed to map PI sg (%d)\n",
+ mem->dma_nents + sig_mem->dma_nents);
+ goto err;
+ }
+
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.next = &tx_desc->send_wr;
+ wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
wr->wr.wr_cqe = cqe;
- wr->wr.sg_list = &data_reg->sge;
- wr->wr.num_sge = 1;
+ wr->wr.num_sge = 0;
wr->wr.send_flags = 0;
- wr->sig_attrs = sig_attrs;
- wr->sig_mr = mr;
- if (scsi_prot_sg_count(iser_task->sc))
- wr->prot = &prot_reg->sge;
- else
- wr->prot = NULL;
- wr->access_flags = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE;
- pi_ctx->sig_mr_valid = 1;
+ wr->mr = mr;
+ wr->key = mr->rkey;
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+ rsc->mr_valid = 1;
sig_reg->sge.lkey = mr->lkey;
sig_reg->rkey = mr->rkey;
- sig_reg->sge.addr = 0;
- sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
+ sig_reg->sge.addr = mr->iova;
+ sig_reg->sge.length = mr->length;
iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
@@ -439,11 +443,11 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct iser_tx_desc *tx_desc = &iser_task->desc;
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
struct ib_mr *mr = rsc->mr;
- struct ib_reg_wr *wr;
+ struct ib_reg_wr *wr = &tx_desc->reg_wr;
int n;
if (rsc->mr_valid)
- iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);
+ iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
@@ -454,7 +458,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
return n < 0 ? n : -EINVAL;
}
- wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr);
+ wr->wr.next = &tx_desc->send_wr;
wr->wr.opcode = IB_WR_REG_MR;
wr->wr.wr_cqe = cqe;
wr->wr.send_flags = 0;
@@ -479,21 +483,6 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
}
static int
-iser_reg_prot_sg(struct iscsi_iser_task *task,
- struct iser_data_buf *mem,
- struct iser_fr_desc *desc,
- bool use_dma_key,
- struct iser_mem_reg *reg)
-{
- struct iser_device *device = task->iser_conn->ib_conn.device;
-
- if (use_dma_key)
- return iser_reg_dma(device, mem, reg);
-
- return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
-}
-
-static int
iser_reg_data_sg(struct iscsi_iser_task *task,
struct iser_data_buf *mem,
struct iser_fr_desc *desc,
@@ -516,7 +505,6 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
struct iser_device *device = ib_conn->device;
struct iser_data_buf *mem = &task->data[dir];
struct iser_mem_reg *reg = &task->rdma_reg[dir];
- struct iser_mem_reg *data_reg;
struct iser_fr_desc *desc = NULL;
bool use_dma_key;
int err;
@@ -529,32 +517,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
reg->mem_h = desc;
}
- if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL)
- data_reg = reg;
- else
- data_reg = &task->desc.data_reg;
-
- err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
- if (unlikely(err))
- goto err_reg;
-
- if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
- struct iser_mem_reg *prot_reg = &task->desc.prot_reg;
-
- if (scsi_prot_sg_count(task->sc)) {
- mem = &task->prot[dir];
- err = iser_reg_prot_sg(task, mem, desc,
- use_dma_key, prot_reg);
- if (unlikely(err))
- goto err_reg;
- }
-
- err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
- prot_reg, reg);
+ if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
+ err = iser_reg_data_sg(task, mem, desc, use_dma_key, reg);
+ if (unlikely(err))
+ goto err_reg;
+ } else {
+ err = iser_reg_sig_mr(task, mem, &task->prot[dir],
+ &desc->rsc, reg);
if (unlikely(err))
goto err_reg;
- desc->pi_ctx->sig_protected = 1;
+ desc->sig_protected = 1;
}
return 0;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 4ff3d98fa6a4..a6548de0e218 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -233,116 +233,63 @@ void iser_free_fmr_pool(struct ib_conn *ib_conn)
kfree(desc);
}
-static int
-iser_alloc_reg_res(struct iser_device *device,
- struct ib_pd *pd,
- struct iser_reg_resources *res,
- unsigned int size)
+static struct iser_fr_desc *
+iser_create_fastreg_desc(struct iser_device *device,
+ struct ib_pd *pd,
+ bool pi_enable,
+ unsigned int size)
{
+ struct iser_fr_desc *desc;
struct ib_device *ib_dev = device->ib_device;
enum ib_mr_type mr_type;
int ret;
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
mr_type = IB_MR_TYPE_SG_GAPS;
else
mr_type = IB_MR_TYPE_MEM_REG;
- res->mr = ib_alloc_mr(pd, mr_type, size);
- if (IS_ERR(res->mr)) {
- ret = PTR_ERR(res->mr);
+ desc->rsc.mr = ib_alloc_mr(pd, mr_type, size);
+ if (IS_ERR(desc->rsc.mr)) {
+ ret = PTR_ERR(desc->rsc.mr);
iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
- return ret;
+ goto err_alloc_mr;
}
- res->mr_valid = 0;
-
- return 0;
-}
-static void
-iser_free_reg_res(struct iser_reg_resources *rsc)
-{
- ib_dereg_mr(rsc->mr);
-}
-
-static int
-iser_alloc_pi_ctx(struct iser_device *device,
- struct ib_pd *pd,
- struct iser_fr_desc *desc,
- unsigned int size)
-{
- struct iser_pi_context *pi_ctx = NULL;
- int ret;
-
- desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
- if (!desc->pi_ctx)
- return -ENOMEM;
-
- pi_ctx = desc->pi_ctx;
-
- ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size);
- if (ret) {
- iser_err("failed to allocate reg_resources\n");
- goto alloc_reg_res_err;
- }
-
- pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
- if (IS_ERR(pi_ctx->sig_mr)) {
- ret = PTR_ERR(pi_ctx->sig_mr);
- goto sig_mr_failure;
+ if (pi_enable) {
+ desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size);
+ if (IS_ERR(desc->rsc.sig_mr)) {
+ ret = PTR_ERR(desc->rsc.sig_mr);
+ iser_err("Failed to allocate sig_mr err=%d\n", ret);
+ goto err_alloc_mr_integrity;
+ }
}
- pi_ctx->sig_mr_valid = 0;
- desc->pi_ctx->sig_protected = 0;
-
- return 0;
+ desc->rsc.mr_valid = 0;
-sig_mr_failure:
- iser_free_reg_res(&pi_ctx->rsc);
-alloc_reg_res_err:
- kfree(desc->pi_ctx);
+ return desc;
- return ret;
-}
+err_alloc_mr_integrity:
+ ib_dereg_mr(desc->rsc.mr);
+err_alloc_mr:
+ kfree(desc);
-static void
-iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
-{
- iser_free_reg_res(&pi_ctx->rsc);
- ib_dereg_mr(pi_ctx->sig_mr);
- kfree(pi_ctx);
+ return ERR_PTR(ret);
}
-static struct iser_fr_desc *
-iser_create_fastreg_desc(struct iser_device *device,
- struct ib_pd *pd,
- bool pi_enable,
- unsigned int size)
+static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
{
- struct iser_fr_desc *desc;
- int ret;
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return ERR_PTR(-ENOMEM);
-
- ret = iser_alloc_reg_res(device, pd, &desc->rsc, size);
- if (ret)
- goto reg_res_alloc_failure;
+ struct iser_reg_resources *res = &desc->rsc;
- if (pi_enable) {
- ret = iser_alloc_pi_ctx(device, pd, desc, size);
- if (ret)
- goto pi_ctx_alloc_failure;
+ ib_dereg_mr(res->mr);
+ if (res->sig_mr) {
+ ib_dereg_mr(res->sig_mr);
+ res->sig_mr = NULL;
}
-
- return desc;
-
-pi_ctx_alloc_failure:
- iser_free_reg_res(&desc->rsc);
-reg_res_alloc_failure:
kfree(desc);
-
- return ERR_PTR(ret);
}
/**
@@ -399,10 +346,7 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
list_del(&desc->all_list);
- iser_free_reg_res(&desc->rsc);
- if (desc->pi_ctx)
- iser_free_pi_ctx(desc->pi_ctx);
- kfree(desc);
+ iser_destroy_fastreg_desc(desc);
++i;
}
@@ -455,7 +399,7 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
init_attr.qp_type = IB_QPT_RC;
if (ib_conn->pi_support) {
init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
- init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
+ init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
iser_conn->max_cmds =
ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
} else {
@@ -707,6 +651,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
struct ib_device_attr *attr = &device->ib_device->attrs;
unsigned short sg_tablesize, sup_sg_tablesize;
unsigned short reserved_mr_pages;
+ u32 max_num_sg;
/*
* FRs without SG_GAPS or FMRs can only map up to a (device) page per
@@ -720,12 +665,17 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
else
reserved_mr_pages = 1;
+ if (iser_conn->ib_conn.pi_support)
+ max_num_sg = attr->max_pi_fast_reg_page_list_len;
+ else
+ max_num_sg = attr->max_fast_reg_page_list_len;
+
sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
sup_sg_tablesize =
min_t(
uint, ISCSI_ISER_MAX_SG_TABLESIZE,
- attr->max_fast_reg_page_list_len - reserved_mr_pages);
+ max_num_sg - reserved_mr_pages);
else
sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
@@ -762,7 +712,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
/* connection T10-PI support */
if (iser_pi_enable) {
if (!(device->ib_device->attrs.device_cap_flags &
- IB_DEVICE_SIGNATURE_HANDOVER)) {
+ IB_DEVICE_INTEGRITY_HANDOVER)) {
iser_warn("T10-PI requested but not supported on %s, "
"continue without T10-PI\n",
dev_name(&ib_conn->device->ib_device->dev));
@@ -1087,7 +1037,8 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
bool signal)
{
- struct ib_send_wr *wr = iser_tx_next_wr(tx_desc);
+ struct ib_send_wr *wr = &tx_desc->send_wr;
+ struct ib_send_wr *first_wr;
int ib_ret;
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
@@ -1101,7 +1052,14 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
wr->opcode = IB_WR_SEND;
wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
- ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, NULL);
+ if (tx_desc->inv_wr.next)
+ first_wr = &tx_desc->inv_wr;
+ else if (tx_desc->reg_wr.wr.next)
+ first_wr = &tx_desc->reg_wr.wr;
+ else
+ first_wr = wr;
+
+ ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
if (ib_ret)
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
ib_ret, wr->opcode);
@@ -1118,9 +1076,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
struct ib_mr_status mr_status;
int ret;
- if (desc && desc->pi_ctx->sig_protected) {
- desc->pi_ctx->sig_protected = 0;
- ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
+ if (desc && desc->sig_protected) {
+ desc->sig_protected = 0;
+ ret = ib_check_mr_status(desc->rsc.sig_mr,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
pr_err("ib_check_mr_status failed, ret %d\n", ret);