aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVarun Prakash <varun@chelsio.com>2020-07-01 22:16:10 +0530
committerMartin K. Petersen <martin.petersen@oracle.com>2020-07-08 01:48:23 -0400
commitf178842224b39e5979dac3a54a124d8f4c845bc0 (patch)
tree297cde6f6d19bceb940a3680e2f81a2a8dc018b9
parentscsi: cxgb4i: Add support for iSCSI segmentation offload (diff)
downloadlinux-dev-f178842224b39e5979dac3a54a124d8f4c845bc0.tar.xz
linux-dev-f178842224b39e5979dac3a54a124d8f4c845bc0.zip
scsi: target: cxgbit: Check connection state before issuing hardware command
Current code does not check connection state before issuing header/data digest offload and DDP page size setup hardware command. Add a connection state check to issue hardware command only if connection is in established state. Signed-off-by: Varun Prakash <varun@chelsio.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index a2b5c796bbc4..493070cedbc7 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1485,6 +1485,26 @@ u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
return flowclen16;
}
+static int
+cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ spin_lock_bh(&csk->lock);
+ if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) {
+ spin_unlock_bh(&csk->lock);
+ pr_err("%s: csk 0x%p, tid %u, state %u\n",
+ __func__, csk, csk->tid, csk->com.state);
+ __kfree_skb(skb);
+ return -1;
+ }
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+ cxgbit_ofld_send(csk->com.cdev, skb);
+ spin_unlock_bh(&csk->lock);
+
+ return 0;
+}
+
int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
@@ -1510,10 +1530,8 @@ int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
(dcrc ? ULP_CRC_DATA : 0)) << 4);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
- cxgbit_get_csk(csk);
- cxgbit_init_wr_wait(&csk->com.wr_wait);
-
- cxgbit_ofld_send(csk->com.cdev, skb);
+ if (cxgbit_send_tcb_skb(csk, skb))
+ return -1;
ret = cxgbit_wait_for_reply(csk->com.cdev,
&csk->com.wr_wait,
@@ -1545,10 +1563,8 @@ int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
req->val = cpu_to_be64(pg_idx << 8);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
- cxgbit_get_csk(csk);
- cxgbit_init_wr_wait(&csk->com.wr_wait);
-
- cxgbit_ofld_send(csk->com.cdev, skb);
+ if (cxgbit_send_tcb_skb(csk, skb))
+ return -1;
ret = cxgbit_wait_for_reply(csk->com.cdev,
&csk->com.wr_wait,