aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/i40iw/i40iw_ctrl.c')
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c119
1 files changed, 92 insertions, 27 deletions
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 42ca5346777d..da9821a10e0d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -348,7 +348,10 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
u16 qs_handle;
int i;
- vsi->mss = l2params->mss;
+ if (vsi->mtu != l2params->mtu) {
+ vsi->mtu = l2params->mtu;
+ i40iw_reinitialize_ieq(dev);
+ }
i40iw_fill_qos_list(l2params->qs_handle_list);
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
@@ -374,7 +377,7 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
* i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
* @qp: qp to be removed from qos
*/
-static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
+void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
{
struct i40iw_sc_vsi *vsi = qp->vsi;
unsigned long flags;
@@ -479,6 +482,10 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
+ INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); /* for the cqp commands backlog. */
+
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);
i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
"%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
@@ -506,7 +513,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
&cqp->sdbuf,
- 128,
+ I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
I40IW_SD_BUF_ALIGNMENT);
if (ret_code)
@@ -589,14 +596,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
}
/**
- * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
- * @cqp: struct for cqp hw
- * @wqe_idx: we index of cqp ring
+ * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
+ * @cqp: pointer to CQP structure
+ * @scratch: private data for CQP WQE
+ * @wqe_idx: WQE index for next WQE on CQP SQ
*/
-u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
+ u64 scratch, u32 *wqe_idx)
{
u64 *wqe = NULL;
- u32 wqe_idx;
enum i40iw_status_code ret_code;
if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
@@ -609,21 +617,33 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
cqp->sq_ring.size);
return NULL;
}
- I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+ I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
if (ret_code)
return NULL;
- if (!wqe_idx)
+ if (!*wqe_idx)
cqp->polarity = !cqp->polarity;
- wqe = cqp->sq_base[wqe_idx].elem;
- cqp->scratch_array[wqe_idx] = scratch;
+ wqe = cqp->sq_base[*wqe_idx].elem;
+ cqp->scratch_array[*wqe_idx] = scratch;
I40IW_CQP_INIT_WQE(wqe);
return wqe;
}
/**
+ * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @scratch: private data for CQP WQE
+ */
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+{
+ u32 wqe_idx;
+
+ return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+}
+
+/**
* i40iw_sc_cqp_destroy - destroy cqp during close
* @cqp: struct for cqp hw
*/
@@ -1774,6 +1794,53 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
+
+ switch (info->ae_id) {
+ case I40IW_AE_PRIV_OPERATION_DENIED:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case I40IW_AE_BAD_CLOSE:
+ case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO:
+ case I40IW_AE_STAG_ZERO_INVALID:
+ case I40IW_AE_IB_RREQ_AND_Q1_FULL:
+ case I40IW_AE_WQE_UNEXPECTED_OPCODE:
+ case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case I40IW_AE_DDP_UBE_INVALID_MO:
+ case I40IW_AE_DDP_UBE_INVALID_QN:
+ case I40IW_AE_DDP_NO_L_BIT:
+ case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case I40IW_AE_INVALID_ARP_ENTRY:
+ case I40IW_AE_INVALID_TCP_OPTION_RCVD:
+ case I40IW_AE_STALE_ARP_ENTRY:
+ case I40IW_AE_LLP_CLOSE_COMPLETE:
+ case I40IW_AE_LLP_CONNECTION_RESET:
+ case I40IW_AE_LLP_FIN_RECEIVED:
+ case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
+ case I40IW_AE_LLP_SYN_RECEIVED:
+ case I40IW_AE_LLP_TERMINATE_RECEIVED:
+ case I40IW_AE_LLP_TOO_MANY_RETRIES:
+ case I40IW_AE_LLP_DOUBT_REACHABILITY:
+ case I40IW_AE_RESET_SENT:
+ case I40IW_AE_TERMINATE_SENT:
+ case I40IW_AE_RESET_NOT_SENT:
+ case I40IW_AE_LCE_QP_CATASTROPHIC:
+ case I40IW_AE_QP_SUSPEND_COMPLETE:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ ae_src = I40IW_AE_SOURCE_RSVD;
+ break;
+ case I40IW_AE_LCE_CQ_CATASTROPHIC:
+ info->cq = true;
+ info->compl_ctx = LS_64_1(compl_ctx, 1);
+ ae_src = I40IW_AE_SOURCE_RSVD;
+ break;
+ }
+
switch (ae_src) {
case I40IW_AE_SOURCE_RQ:
case I40IW_AE_SOURCE_RQ_0011:
@@ -1807,6 +1874,8 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
info->compl_ctx = compl_ctx;
info->out_rdrsp = true;
break;
+ case I40IW_AE_SOURCE_RSVD:
+ /* fallthrough */
default:
break;
}
@@ -2357,7 +2426,6 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
qp->rcv_tph_en = info->rcv_tph_en;
qp->xmit_tph_en = info->xmit_tph_en;
qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
- qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
return 0;
}
@@ -2399,7 +2467,6 @@ static enum i40iw_status_code i40iw_sc_qp_create(
LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
- LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
@@ -2462,7 +2529,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
- LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
@@ -2694,7 +2760,7 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
- LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
+ LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
if (info->iwarp_info_valid) {
qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
@@ -3534,8 +3600,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
u64 *wqe;
int mem_entries, wqe_entries;
struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
+ u64 offset;
+ u32 wqe_idx;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
if (!wqe)
return I40IW_ERR_RING_FULL;
@@ -3548,8 +3616,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
if (mem_entries) {
- memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
- data = sdbuf->pa;
+ offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
+ memcpy((char *)sdbuf->va + offset, &info->entry[3],
+ mem_entries << 4);
+ data = (u64)sdbuf->pa + offset;
} else {
data = 0;
}
@@ -4376,10 +4446,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
break;
- case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
- (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
- break;
case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
@@ -4395,7 +4461,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
(LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
break;
case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
- case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
break;
@@ -4541,7 +4606,8 @@ void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *inf
vsi->dev = info->dev;
vsi->back_vsi = info->back_vsi;
- vsi->mss = info->params->mss;
+ vsi->mtu = info->params->mtu;
+ vsi->exception_lan_queue = info->exception_lan_queue;
i40iw_fill_qos_list(info->params->qs_handle_list);
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
@@ -4873,6 +4939,7 @@ enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40
vsi->pestat = info->pestat;
vsi->pestat->hw = vsi->dev->hw;
+ vsi->pestat->vsi = vsi;
if (info->stats_initialize) {
i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
@@ -5018,14 +5085,12 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
u8 db_size;
spin_lock_init(&dev->cqp_lock);
- INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */
i40iw_device_init_uk(&dev->dev_uk);
dev->debug_mask = info->debug_mask;
dev->hmc_fn_id = info->hmc_fn_id;
- dev->exception_lan_queue = info->exception_lan_queue;
dev->is_pf = info->is_pf;
dev->fpm_query_buf_pa = info->fpm_query_buf_pa;