aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/i40iw
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 14:54:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 14:54:53 -0800
commitad0835a93008e5901415a0a27847d6a27649aa3a (patch)
treee48be396ebfbb4f1fb02e7ca76461bdb1427490d /drivers/infiniband/hw/i40iw
parentMerge branch 'for-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup (diff)
parentRDMA/core: Rename kernel modify_cq to better describe its usage (diff)
downloadlinux-dev-ad0835a93008e5901415a0a27847d6a27649aa3a.tar.xz
linux-dev-ad0835a93008e5901415a0a27847d6a27649aa3a.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma updates from Doug Ledford: "This is a fairly plain pull request. Lots of driver updates across the stack, a huge number of static analysis cleanups including a close to 50 patch series from Bart Van Assche, and a number of new features inside the stack such as general CQ moderation support. Nothing really stands out, but there might be a few conflicts as you take things in. In particular, the cleanups touched some of the same lines as the new timer_setup changes. Everything in this pull request has been through 0day and at least two days of linux-next (since Stephen doesn't necessarily flag new errors/warnings until day2). A few more items (about 30 patches) from Intel and Mellanox showed up on the list on Tuesday. I've excluded those from this pull request, and I'm sure some of them qualify as fixes suitable to send any time, but I still have to review them fully. If they contain mostly fixes and little or no new development, then I will probably send them through by the end of the week just to get them out of the way. There was a break in my acceptance of patches which coincides with the computer problems I had, and then when I got things mostly back under control I had a backlog of patches to process, which I did mostly last Friday and Monday. So there is a larger number of patches processed in that timeframe than I was striving for. Summary: - Add iWARP support to qedr driver - Lots of misc fixes across subsystem - Multiple update series to hns roce driver - Multiple update series to hfi1 driver - Updates to vnic driver - Add kref to wait struct in cxgb4 driver - Updates to i40iw driver - Mellanox shared pull request - timer_setup changes - massive cleanup series from Bart Van Assche - Two series of SRP/SRPT changes from Bart Van Assche - Core updates from Mellanox - i40iw updates - IPoIB updates - mlx5 updates - mlx4 updates - hns updates - bnxt_re fixes - PCI write padding support - Sparse/Smatch/warning cleanups/fixes - CQ moderation support - SRQ support in vmw_pvrdma" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (296 commits) RDMA/core: Rename kernel modify_cq to better describe its usage IB/mlx5: Add CQ moderation capability to query_device IB/mlx4: Add CQ moderation capability to query_device IB/uverbs: Add CQ moderation capability to query_device IB/mlx5: Exposing modify CQ callback to uverbs layer IB/mlx4: Exposing modify CQ callback to uverbs layer IB/uverbs: Allow CQ moderation with modify CQ iw_cxgb4: atomically flush the qp iw_cxgb4: only call the cq comp_handler when the cq is armed iw_cxgb4: Fix possible circular dependency locking warning RDMA/bnxt_re: report vlan_id and sl in qp1 recv completion IB/core: Only maintain real QPs in the security lists IB/ocrdma_hw: remove unnecessary code in ocrdma_mbx_dealloc_lkey RDMA/core: Make function rdma_copy_addr return void RDMA/vmw_pvrdma: Add shared receive queue support RDMA/core: avoid uninitialized variable warning in create_udata RDMA/bnxt_re: synchronize poll_cq and req_notify_cq verbs RDMA/bnxt_re: Flush CQ notification Work Queue before destroying QP RDMA/bnxt_re: Set QP state in case of response completion errors RDMA/bnxt_re: Add memory barriers when processing CQ/EQ entries ...
Diffstat (limited to 'drivers/infiniband/hw/i40iw')
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c30
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c76
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h30
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c48
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c19
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c76
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h23
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c30
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c47
16 files changed, 256 insertions, 149 deletions
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
index 6e7d27a14061..f6d20ba88c03 100644
--- a/drivers/infiniband/hw/i40iw/Kconfig
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_I40IW
tristate "Intel(R) Ethernet X722 iWARP Driver"
depends on INET && I40E
+ depends on PCI
select GENERIC_ALLOCATOR
---help---
Intel(R) Ethernet X722 iWARP Driver
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index a65e4cbdce2f..4ae9131b6350 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -119,9 +119,6 @@
#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3
#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
-#define I40IW_MTU_TO_MSS 40
-#define I40IW_DEFAULT_MSS 1460
-
struct i40iw_cqp_compl_info {
u32 op_ret_val;
u16 maj_err_code;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 5230dd3c938c..493d6ef3d2d5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1188,7 +1188,7 @@ static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node
* i40iw_cm_timer_tick - system's timer expired callback
* @pass: Pointing to cm_core
*/
-static void i40iw_cm_timer_tick(unsigned long pass)
+static void i40iw_cm_timer_tick(struct timer_list *t)
{
unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
struct i40iw_cm_node *cm_node;
@@ -1196,10 +1196,9 @@ static void i40iw_cm_timer_tick(unsigned long pass)
struct list_head *list_core_temp;
struct i40iw_sc_vsi *vsi;
struct list_head *list_node;
- struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
+ struct i40iw_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
u32 settimer = 0;
unsigned long timetosend;
- struct i40iw_sc_dev *dev;
unsigned long flags;
struct list_head timer_list;
@@ -1267,13 +1266,15 @@ static void i40iw_cm_timer_tick(unsigned long pass)
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
goto done;
}
- cm_node->cm_core->stats_pkt_retrans++;
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
vsi = &cm_node->iwdev->vsi;
- dev = cm_node->dev;
- atomic_inc(&send_entry->sqbuf->refcount);
- i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+
+ if (!cm_node->ack_rcvd) {
+ atomic_inc(&send_entry->sqbuf->refcount);
+ i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+ cm_node->cm_core->stats_pkt_retrans++;
+ }
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
if (send_entry->send_retrans) {
send_entry->retranscount--;
@@ -1524,8 +1525,8 @@ static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool acti
break;
}
}
- if (!ret)
- clear_bit(port, cm_core->active_side_ports);
+ if (!ret)
+ clear_bit(port, cm_core->active_side_ports);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
} else {
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
@@ -2181,6 +2182,7 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
cm_node->cm_id = cm_info->cm_id;
ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
spin_lock_init(&cm_node->retrans_list_lock);
+ cm_node->ack_rcvd = false;
atomic_set(&cm_node->ref_count, 1);
/* associate our parent CM core */
@@ -2191,7 +2193,8 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
ts = current_kernel_time();
cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
- cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
+ cm_node->tcp_cntxt.mss = (cm_node->ipv4) ? (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4) :
+ (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6);
cm_node->iwdev = iwdev;
cm_node->dev = &iwdev->sc_dev;
@@ -2406,6 +2409,7 @@ static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
case I40IW_CM_STATE_FIN_WAIT1:
case I40IW_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
+ /* fall through */
case I40IW_CM_STATE_TIME_WAIT:
cm_node->state = I40IW_CM_STATE_CLOSED;
i40iw_rem_ref_cm_node(cm_node);
@@ -2719,7 +2723,10 @@ static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ cm_node->ack_rcvd = false;
i40iw_handle_rcv_mpa(cm_node, rbuf);
+ } else {
+ cm_node->ack_rcvd = true;
}
break;
case I40IW_CM_STATE_LISTENING:
@@ -3195,8 +3202,7 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
INIT_LIST_HEAD(&cm_core->connected_nodes);
INIT_LIST_HEAD(&cm_core->listen_nodes);
- setup_timer(&cm_core->tcp_timer, i40iw_cm_timer_tick,
- (unsigned long)cm_core);
+ timer_setup(&cm_core->tcp_timer, i40iw_cm_timer_tick, 0);
spin_lock_init(&cm_core->ht_lock);
spin_lock_init(&cm_core->listen_list_lock);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 45abef76295b..0d5840d2c4fc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -360,6 +360,7 @@ struct i40iw_cm_node {
u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
struct i40iw_kmem_info mpa_hdr;
+ bool ack_rcvd;
};
/* structure for client or CM to fill when making CM api calls. */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 42ca5346777d..d88c6cf47cf2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -348,7 +348,10 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
u16 qs_handle;
int i;
- vsi->mss = l2params->mss;
+ if (vsi->mtu != l2params->mtu) {
+ vsi->mtu = l2params->mtu;
+ i40iw_reinitialize_ieq(dev);
+ }
i40iw_fill_qos_list(l2params->qs_handle_list);
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
@@ -374,7 +377,7 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
* i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
* @qp: qp to be removed from qos
*/
-static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
+void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
{
struct i40iw_sc_vsi *vsi = qp->vsi;
unsigned long flags;
@@ -479,6 +482,10 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
+ INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); /* for the cqp commands backlog. */
+
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);
i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
"%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
@@ -1774,6 +1781,53 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
+
+ switch (info->ae_id) {
+ case I40IW_AE_PRIV_OPERATION_DENIED:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case I40IW_AE_BAD_CLOSE:
+ case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO:
+ case I40IW_AE_STAG_ZERO_INVALID:
+ case I40IW_AE_IB_RREQ_AND_Q1_FULL:
+ case I40IW_AE_WQE_UNEXPECTED_OPCODE:
+ case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case I40IW_AE_DDP_UBE_INVALID_MO:
+ case I40IW_AE_DDP_UBE_INVALID_QN:
+ case I40IW_AE_DDP_NO_L_BIT:
+ case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case I40IW_AE_INVALID_ARP_ENTRY:
+ case I40IW_AE_INVALID_TCP_OPTION_RCVD:
+ case I40IW_AE_STALE_ARP_ENTRY:
+ case I40IW_AE_LLP_CLOSE_COMPLETE:
+ case I40IW_AE_LLP_CONNECTION_RESET:
+ case I40IW_AE_LLP_FIN_RECEIVED:
+ case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
+ case I40IW_AE_LLP_SYN_RECEIVED:
+ case I40IW_AE_LLP_TERMINATE_RECEIVED:
+ case I40IW_AE_LLP_TOO_MANY_RETRIES:
+ case I40IW_AE_LLP_DOUBT_REACHABILITY:
+ case I40IW_AE_RESET_SENT:
+ case I40IW_AE_TERMINATE_SENT:
+ case I40IW_AE_RESET_NOT_SENT:
+ case I40IW_AE_LCE_QP_CATASTROPHIC:
+ case I40IW_AE_QP_SUSPEND_COMPLETE:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ ae_src = I40IW_AE_SOURCE_RSVD;
+ break;
+ case I40IW_AE_LCE_CQ_CATASTROPHIC:
+ info->cq = true;
+ info->compl_ctx = LS_64_1(compl_ctx, 1);
+ ae_src = I40IW_AE_SOURCE_RSVD;
+ break;
+ }
+
switch (ae_src) {
case I40IW_AE_SOURCE_RQ:
case I40IW_AE_SOURCE_RQ_0011:
@@ -1807,6 +1861,8 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
info->compl_ctx = compl_ctx;
info->out_rdrsp = true;
break;
+ case I40IW_AE_SOURCE_RSVD:
+ /* fallthrough */
default:
break;
}
@@ -2357,7 +2413,6 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
qp->rcv_tph_en = info->rcv_tph_en;
qp->xmit_tph_en = info->xmit_tph_en;
qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
- qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
return 0;
}
@@ -2399,7 +2454,6 @@ static enum i40iw_status_code i40iw_sc_qp_create(
LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
- LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
@@ -2462,7 +2516,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
- LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
@@ -2694,7 +2747,7 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
- LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
+ LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
if (info->iwarp_info_valid) {
qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
@@ -4376,10 +4429,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
break;
- case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
- (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
- break;
case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
@@ -4395,7 +4444,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
(LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
break;
case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
- case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
break;
@@ -4541,7 +4589,8 @@ void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *inf
vsi->dev = info->dev;
vsi->back_vsi = info->back_vsi;
- vsi->mss = info->params->mss;
+ vsi->mtu = info->params->mtu;
+ vsi->exception_lan_queue = info->exception_lan_queue;
i40iw_fill_qos_list(info->params->qs_handle_list);
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
@@ -4873,6 +4922,7 @@ enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40
vsi->pestat = info->pestat;
vsi->pestat->hw = vsi->dev->hw;
+ vsi->pestat->vsi = vsi;
if (info->stats_initialize) {
i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
@@ -5018,14 +5068,12 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
u8 db_size;
spin_lock_init(&dev->cqp_lock);
- INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */
i40iw_device_init_uk(&dev->dev_uk);
dev->debug_mask = info->debug_mask;
dev->hmc_fn_id = info->hmc_fn_id;
- dev->exception_lan_queue = info->exception_lan_queue;
dev->is_pf = info->is_pf;
dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 2ebaadbed379..65ec39e3746b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -73,6 +73,10 @@
#define I40IW_FIRST_NON_PF_STAT 4
+#define I40IW_MTU_TO_MSS_IPV4 40
+#define I40IW_MTU_TO_MSS_IPV6 60
+#define I40IW_DEFAULT_MTU 1500
+
#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)
#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)
#define LS_32_1(val, bits) (u32)(val << bits)
@@ -128,6 +132,7 @@
&_ceq->ceqe_base[I40IW_RING_GETCURRENT_TAIL(_ceq->ceq_ring)] \
)
+#define I40IW_AE_SOURCE_RSVD 0x0
#define I40IW_AE_SOURCE_RQ 0x1
#define I40IW_AE_SOURCE_RQ_0011 0x3
@@ -539,9 +544,6 @@
#define I40IW_CQPSQ_QP_MSSCHANGE_SHIFT 52
#define I40IW_CQPSQ_QP_MSSCHANGE_MASK (1ULL << I40IW_CQPSQ_QP_MSSCHANGE_SHIFT)
-#define I40IW_CQPSQ_QP_STATRSRC_SHIFT 53
-#define I40IW_CQPSQ_QP_STATRSRC_MASK (1ULL << I40IW_CQPSQ_QP_STATRSRC_SHIFT)
-
#define I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT 54
#define I40IW_CQPSQ_QP_IGNOREMWBOUND_MASK \
(1ULL << I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT)
@@ -1105,6 +1107,9 @@
#define I40IWQPC_SNDMSS_SHIFT 16
#define I40IWQPC_SNDMSS_MASK (0x3fffUL << I40IWQPC_SNDMSS_SHIFT)
+#define I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT 16
+#define I40IW_UDA_QPC_MAXFRAMESIZE_MASK (0x3fffUL << I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT)
+
#define I40IWQPC_VLANTAG_SHIFT 32
#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
@@ -1296,8 +1301,13 @@
(0xffffffffULL << I40IWQPC_LOCAL_IPADDR0_SHIFT)
/* wqe size considering 32 bytes per wqe*/
-#define I40IWQP_SW_MIN_WQSIZE 4 /* 128 bytes */
-#define I40IWQP_SW_MAX_WQSIZE 2048 /* 2048 bytes */
+#define I40IW_QP_SW_MIN_WQSIZE 4 /*in WRs*/
+#define I40IW_SQ_RSVD 2
+#define I40IW_RQ_RSVD 1
+#define I40IW_MAX_QUANTAS_PER_WR 2
+#define I40IW_QP_SW_MAX_SQ_QUANTAS 2048
+#define I40IW_QP_SW_MAX_RQ_QUANTAS 16384
+#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTAS / I40IW_MAX_QUANTAS_PER_WR) - 1)
#define I40IWQP_OP_RDMA_WRITE 0
#define I40IWQP_OP_RDMA_READ 1
@@ -1636,7 +1646,8 @@ enum i40iw_alignment {
#define I40IW_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
#define I40IW_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
#define I40IW_AE_AMP_MWBIND_BIND_DISABLED 0x011b
-#define I40IW_AE_AMP_WQE_INVALID_PARAMETER 0x0130
+#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
#define I40IW_AE_BAD_CLOSE 0x0201
#define I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
#define I40IW_AE_CQ_OPERATION_ERROR 0x0203
@@ -1644,12 +1655,10 @@ enum i40iw_alignment {
#define I40IW_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
#define I40IW_AE_STAG_ZERO_INVALID 0x0206
#define I40IW_AE_IB_RREQ_AND_Q1_FULL 0x0207
-#define I40IW_AE_SRQ_LIMIT 0x0209
#define I40IW_AE_WQE_UNEXPECTED_OPCODE 0x020a
#define I40IW_AE_WQE_INVALID_PARAMETER 0x020b
#define I40IW_AE_WQE_LSMM_TOO_LONG 0x0220
#define I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
-#define I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID 0x0302
#define I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
#define I40IW_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
#define I40IW_AE_DDP_UBE_INVALID_MO 0x0305
@@ -1663,12 +1672,10 @@ enum i40iw_alignment {
#define I40IW_AE_INVALID_ARP_ENTRY 0x0401
#define I40IW_AE_INVALID_TCP_OPTION_RCVD 0x0402
#define I40IW_AE_STALE_ARP_ENTRY 0x0403
-#define I40IW_AE_INVALID_WQE_LENGTH 0x0404
#define I40IW_AE_INVALID_MAC_ENTRY 0x0405
#define I40IW_AE_LLP_CLOSE_COMPLETE 0x0501
#define I40IW_AE_LLP_CONNECTION_RESET 0x0502
#define I40IW_AE_LLP_FIN_RECEIVED 0x0503
-#define I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
#define I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
#define I40IW_AE_LLP_SEGMENT_TOO_LARGE 0x0506
#define I40IW_AE_LLP_SEGMENT_TOO_SMALL 0x0507
@@ -1685,9 +1692,6 @@ enum i40iw_alignment {
#define I40IW_AE_LCE_QP_CATASTROPHIC 0x0700
#define I40IW_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
#define I40IW_AE_LCE_CQ_CATASTROPHIC 0x0702
-#define I40IW_AE_UDA_XMIT_FRAG_SEQ 0x0800
-#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0801
-#define I40IW_AE_UDA_XMIT_IPADDR_MISMATCH 0x0802
#define I40IW_AE_QP_SUSPEND_COMPLETE 0x0900
#define OP_DELETE_LOCAL_MAC_IPADDR_ENTRY 1
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 476867a3f584..e96bdafbcbb3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -408,8 +408,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
case I40IW_AE_LCE_CQ_CATASTROPHIC:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
ctx_info->err_rq_idx_valid = false;
+ /* fall through */
default:
if (!info->sq && ctx_info->err_rq_idx_valid) {
ctx_info->err_rq_idx = info->wqe_idx;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 27590ae21881..e824296713e2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -353,6 +353,8 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
i40iw_destroy_ceq(iwdev, iwceq);
}
+
+ iwdev->sc_dev.ceq_valid = false;
}
/**
@@ -810,17 +812,16 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
iwdev->ceqs_count++;
}
-
exit:
- if (status) {
- if (!iwdev->ceqs_count) {
- kfree(iwdev->ceqlist);
- iwdev->ceqlist = NULL;
- } else {
- status = 0;
- }
+ if (status && !iwdev->ceqs_count) {
+ kfree(iwdev->ceqlist);
+ iwdev->ceqlist = NULL;
+ return status;
+ } else {
+ iwdev->sc_dev.ceq_valid = true;
+ return 0;
}
- return status;
+
}
/**
@@ -958,13 +959,13 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
memset(&info, 0, sizeof(info));
info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
info.cq_id = 2;
- info.qp_id = iwdev->sc_dev.exception_lan_queue;
+ info.qp_id = iwdev->vsi.exception_lan_queue;
info.count = 1;
info.pd_id = 2;
info.sq_size = 8192;
info.rq_size = 8192;
- info.buf_size = 2048;
- info.tx_buf_cnt = 16384;
+ info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN;
+ info.tx_buf_cnt = 4096;
status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
if (status)
i40iw_pr_err("ieq create fail\n");
@@ -972,6 +973,21 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
}
/**
+ * i40iw_reinitialize_ieq - destroy and re-create ieq
+ * @dev: iwarp device
+ */
+void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false);
+ if (i40iw_initialize_ieq(iwdev)) {
+ iwdev->reset = true;
+ i40iw_request_reset(iwdev);
+ }
+}
+
+/**
* i40iw_hmc_setup - create hmc objects for the device
* @iwdev: iwarp device
*
@@ -1327,8 +1343,8 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
info.bar0 = ldev->hw_addr;
info.hw = &iwdev->hw;
info.debug_mask = debug;
- l2params.mss =
- (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
+ l2params.mtu =
+ (ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU;
for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
qset = ldev->params.qos.prio_qos[i].qs_handle;
l2params.qs_handle_list[i] = qset;
@@ -1338,7 +1354,6 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
iwdev->dcb = true;
}
i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
- info.exception_lan_queue = 1;
info.vchnl_send = i40iw_virtchnl_send;
status = i40iw_device_init(&iwdev->sc_dev, &info);
@@ -1348,6 +1363,7 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
vsi_info.dev = &iwdev->sc_dev;
vsi_info.back_vsi = (void *)iwdev;
vsi_info.params = &l2params;
+ vsi_info.exception_lan_queue = 1;
i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
if (dev->is_pf) {
@@ -1748,7 +1764,7 @@ static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *cli
for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
- l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
+ l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;
INIT_WORK(&work->work, i40iw_l2params_worker);
queue_work(iwdev->param_wq, &work->work);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index 5498ad01c280..11d3a2a72100 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -86,7 +86,7 @@ void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *inf
void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
-
+void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp);
void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
@@ -123,5 +123,6 @@ enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
struct i40iw_virt_mem *mem);
u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq);
+void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 59f70676f0e0..796a815b53fd 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -488,7 +488,7 @@ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
- set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
+ set_64bit_val(qp_ctx, 48, LS_64(rsrc->buf_size, I40IW_UDA_QPC_MAXFRAMESIZE));
set_64bit_val(qp_ctx, 56, 0);
set_64bit_val(qp_ctx, 64, 1);
@@ -611,12 +611,14 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
qp->user_pri = 0;
i40iw_qp_add_qos(qp);
i40iw_puda_qp_setctx(rsrc);
- if (rsrc->ceq_valid)
+ if (rsrc->dev->ceq_valid)
ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
else
ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
- if (ret)
+ if (ret) {
+ i40iw_qp_rem_qos(qp);
i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
+ }
return ret;
}
@@ -704,7 +706,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
if (ret)
goto error;
- if (rsrc->ceq_valid)
+ if (rsrc->dev->ceq_valid)
ret = i40iw_cqp_cq_create_cmd(dev, cq);
else
ret = i40iw_puda_cq_wqe(dev, cq);
@@ -724,7 +726,7 @@ static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
struct i40iw_ccq_cqe_info compl_info;
struct i40iw_sc_dev *dev = rsrc->dev;
- if (rsrc->ceq_valid) {
+ if (rsrc->dev->ceq_valid) {
i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
return;
}
@@ -757,7 +759,7 @@ static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
struct i40iw_ccq_cqe_info compl_info;
struct i40iw_sc_dev *dev = rsrc->dev;
- if (rsrc->ceq_valid) {
+ if (rsrc->dev->ceq_valid) {
i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
return;
}
@@ -813,6 +815,7 @@ void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
switch (rsrc->completion) {
case PUDA_HASH_CRC_COMPLETE:
i40iw_free_hash_desc(rsrc->hash_desc);
+ /* fall through */
case PUDA_QP_CREATED:
if (!reset)
i40iw_puda_free_qp(rsrc);
@@ -921,7 +924,6 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
rsrc->xmit_complete = i40iw_ieq_tx_compl;
}
- rsrc->ceq_valid = info->ceq_valid;
rsrc->type = info->type;
rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
@@ -1400,7 +1402,8 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
pfpdu->rcv_nxt = fps;
pfpdu->fps = fps;
pfpdu->mode = true;
- pfpdu->max_fpdu_data = ieq->vsi->mss;
+ pfpdu->max_fpdu_data = (buf->ipv4) ? (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV4) :
+ (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV6);
pfpdu->pmode_count++;
INIT_LIST_HEAD(rxlist);
i40iw_ieq_check_first_buf(buf, fps);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
index dba05ce7d392..660aa3edae56 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -100,7 +100,6 @@ struct i40iw_puda_rsrc_info {
enum puda_resource_type type; /* ILQ or IEQ */
u32 count;
u16 pd_id;
- bool ceq_valid;
u32 cq_id;
u32 qp_id;
u32 sq_size;
@@ -125,7 +124,6 @@ struct i40iw_puda_rsrc {
enum puda_resource_type type;
u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
u16 mss;
- bool ceq_valid;
u32 cq_id;
u32 qp_id;
u32 sq_size;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 63118f6d5ab4..a27d392c92a2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -250,6 +250,7 @@ struct i40iw_vsi_pestat {
struct i40iw_dev_hw_stats last_read_hw_stats;
struct i40iw_dev_hw_stats_offsets hw_stats_offsets;
struct timer_list stats_timer;
+ struct i40iw_sc_vsi *vsi;
spinlock_t lock; /* rdma stats lock */
};
@@ -380,7 +381,6 @@ struct i40iw_sc_qp {
u8 *q2_buf;
u64 qp_compl_ctx;
u16 qs_handle;
- u16 exception_lan_queue;
u16 push_idx;
u8 sq_tph_val;
u8 rq_tph_val;
@@ -459,7 +459,8 @@ struct i40iw_sc_vsi {
u32 ieq_count;
struct i40iw_virt_mem ieq_mem;
struct i40iw_puda_rsrc *ieq;
- u16 mss;
+ u16 exception_lan_queue;
+ u16 mtu;
u8 fcn_id;
bool stats_fcn_id_alloc;
struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
@@ -501,10 +502,10 @@ struct i40iw_sc_dev {
struct i40iw_hmc_fpm_misc hmc_fpm_misc;
u32 debug_mask;
- u16 exception_lan_queue;
u8 hmc_fn_id;
bool is_pf;
bool vchnl_up;
+ bool ceq_valid;
u8 vf_id;
wait_queue_head_t vf_reqs;
u64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY];
@@ -534,7 +535,6 @@ struct i40iw_create_qp_info {
bool ord_valid;
bool tcp_ctx_valid;
bool cq_num_valid;
- bool static_rsrc;
bool arp_cache_idx_valid;
};
@@ -546,7 +546,6 @@ struct i40iw_modify_qp_info {
bool ord_valid;
bool tcp_ctx_valid;
bool cq_num_valid;
- bool static_rsrc;
bool arp_cache_idx_valid;
bool reset_tcp_conn;
bool remove_hash_idx;
@@ -568,13 +567,14 @@ struct i40iw_ccq_cqe_info {
struct i40iw_l2params {
u16 qs_handle_list[I40IW_MAX_USER_PRIORITY];
- u16 mss;
+ u16 mtu;
};
struct i40iw_vsi_init_info {
struct i40iw_sc_dev *dev;
void *back_vsi;
struct i40iw_l2params *params;
+ u16 exception_lan_queue;
};
struct i40iw_vsi_stats_info {
@@ -592,7 +592,6 @@ struct i40iw_device_init_info {
struct i40iw_hw *hw;
void __iomem *bar0;
enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
- u16 exception_lan_queue;
u8 hmc_fn_id;
bool is_pf;
u32 debug_mask;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 0aadb7a0d1aa..3ec5389a81a1 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -821,6 +821,18 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
pring = &qp->rq_ring;
} else {
+ if (qp->first_sq_wq) {
+ qp->first_sq_wq = false;
+ if (!wqe_idx && (qp->sq_ring.head == qp->sq_ring.tail)) {
+ I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ I40IW_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
+ memset(info, 0, sizeof(struct i40iw_cq_poll_info));
+ return i40iw_cq_poll_completion(cq, info);
+ }
+ }
+
if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
@@ -882,8 +894,21 @@ exit:
}
/**
+ * i40iw_qp_roundup - return round up QP WQ depth
+ * @wqdepth: WQ depth in quantas to round up
+ */
+static int i40iw_qp_round_up(u32 wqdepth)
+{
+ int scount = 1;
+
+ for (wqdepth--; scount <= 16; scount *= 2)
+ wqdepth |= wqdepth >> scount;
+
+ return ++wqdepth;
+}
+
+/**
* i40iw_get_wqe_shift - get shift count for maximum wqe size
- * @wqdepth: depth of wq required.
* @sge: Maximum Scatter Gather Elements wqe
* @inline_data: Maximum inline data size
* @shift: Returns the shift needed based on sge
@@ -893,22 +918,48 @@ exit:
* For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
* Shift of 2 otherwise (wqe size of 128 bytes).
*/
-enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift)
+void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift)
{
- u32 size;
-
*shift = 0;
if (sge > 1 || inline_data > 16)
*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
+}
- /* check if wqdepth is multiple of 2 or not */
+/*
+ * i40iw_get_sqdepth - get SQ depth (quantas)
+ * @sq_size: SQ size
+ * @shift: shift which determines size of WQE
+ * @sqdepth: depth of SQ
+ *
+ */
+enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
+{
+ *sqdepth = i40iw_qp_round_up((sq_size << shift) + I40IW_SQ_RSVD);
- if ((wqdepth < I40IWQP_SW_MIN_WQSIZE) || (wqdepth & (wqdepth - 1)))
+ if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
+ *sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
+ else if (*sqdepth > I40IW_QP_SW_MAX_SQ_QUANTAS)
return I40IW_ERR_INVALID_SIZE;
- size = wqdepth << *shift; /* multiple of 32 bytes count */
- if (size > I40IWQP_SW_MAX_WQSIZE)
+ return 0;
+}
+
+/*
+ * i40iw_get_rq_depth - get RQ depth (quantas)
+ * @rq_size: RQ size
+ * @shift: shift which determines size of WQE
+ * @rqdepth: depth of RQ
+ *
+ */
+enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)
+{
+ *rqdepth = i40iw_qp_round_up((rq_size << shift) + I40IW_RQ_RSVD);
+
+ if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
+ *rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
+ else if (*rqdepth > I40IW_QP_SW_MAX_RQ_QUANTAS)
return I40IW_ERR_INVALID_SIZE;
+
return 0;
}
@@ -962,9 +1013,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
return I40IW_ERR_INVALID_FRAG_COUNT;
- ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
- if (ret_code)
- return ret_code;
+ i40iw_get_wqe_shift(info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
qp->sq_base = info->sq;
qp->rq_base = info->rq;
@@ -988,6 +1037,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
I40IW_RING_MOVE_TAIL(qp->sq_ring);
I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
qp->swqe_polarity = 1;
+ qp->first_sq_wq = true;
qp->swqe_polarity_deferred = 1;
qp->rwqe_polarity = 0;
@@ -997,9 +1047,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
switch (info->abi_ver) {
case 4:
- ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
- if (ret_code)
- return ret_code;
+ i40iw_get_wqe_shift(info->max_rq_frag_cnt, 0, &rqshift);
break;
case 5: /* fallthrough until next ABI version */
default:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index 84be6f13b9c5..e73efc59a0ab 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -204,18 +204,6 @@ struct i40iw_post_inline_send {
u32 len;
};
-struct i40iw_post_send_w_inv {
- i40iw_sgl sg_list;
- u32 num_sges;
- i40iw_stag remote_stag_to_inv;
-};
-
-struct i40iw_post_inline_send_w_inv {
- void *data;
- u32 len;
- i40iw_stag remote_stag_to_inv;
-};
-
struct i40iw_rdma_write {
i40iw_sgl lo_sg_list;
u32 num_lo_sges;
@@ -257,9 +245,6 @@ struct i40iw_post_sq_info {
bool defer_flag;
union {
struct i40iw_post_send send;
- struct i40iw_post_send send_w_sol;
- struct i40iw_post_send_w_inv send_w_inv;
- struct i40iw_post_send_w_inv send_w_sol_inv;
struct i40iw_rdma_write rdma_write;
struct i40iw_rdma_read rdma_read;
struct i40iw_rdma_read rdma_read_inv;
@@ -267,9 +252,6 @@ struct i40iw_post_sq_info {
struct i40iw_inv_local_stag inv_local_stag;
struct i40iw_inline_rdma_write inline_rdma_write;
struct i40iw_post_inline_send inline_send;
- struct i40iw_post_inline_send inline_send_w_sol;
- struct i40iw_post_inline_send_w_inv inline_send_w_inv;
- struct i40iw_post_inline_send_w_inv inline_send_w_sol_inv;
} op;
};
@@ -376,6 +358,7 @@ struct i40iw_qp_uk {
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
+ bool first_sq_wq;
bool deferred_flag;
};
@@ -442,5 +425,7 @@ enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
u8 *wqe_size);
-enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift);
+void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift);
+enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth);
+enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index e52dbbb4165e..8845dba7c438 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -168,11 +168,16 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
if (netdev != event_netdev)
return NOTIFY_DONE;
- if (upper_dev)
- local_ipaddr = ntohl(
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
- else
+ if (upper_dev) {
+ struct in_device *in;
+
+ rcu_read_lock();
+ in = __in_dev_get_rcu(upper_dev);
+ local_ipaddr = ntohl(in->ifa_list->ifa_address);
+ rcu_read_unlock();
+ } else {
local_ipaddr = ntohl(ifa->ifa_address);
+ }
switch (event) {
case NETDEV_DOWN:
action = I40IW_ARP_DELETE;
@@ -870,9 +875,9 @@ void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
* i40iw_terminate_imeout - timeout happened
* @context: points to iwarp qp
*/
-static void i40iw_terminate_timeout(unsigned long context)
+static void i40iw_terminate_timeout(struct timer_list *t)
{
- struct i40iw_qp *iwqp = (struct i40iw_qp *)context;
+ struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer);
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
i40iw_terminate_done(qp, 1);
@@ -889,8 +894,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
iwqp = (struct i40iw_qp *)qp->back_qp;
i40iw_add_ref(&iwqp->ibqp);
- setup_timer(&iwqp->terminate_timer, i40iw_terminate_timeout,
- (unsigned long)iwqp);
+ timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
iwqp->terminate_timer.expires = jiffies + HZ;
add_timer(&iwqp->terminate_timer);
}
@@ -1445,11 +1449,12 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
* i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
* @vsi: pointer to the vsi structure
*/
-static void i40iw_hw_stats_timeout(unsigned long vsi)
+static void i40iw_hw_stats_timeout(struct timer_list *t)
{
- struct i40iw_sc_vsi *sc_vsi = (struct i40iw_sc_vsi *)vsi;
+ struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t,
+ stats_timer);
+ struct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi;
struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
- struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
struct i40iw_vsi_pestat *vf_devstat = NULL;
u16 iw_vf_idx;
unsigned long flags;
@@ -1480,8 +1485,7 @@ void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
{
struct i40iw_vsi_pestat *devstat = vsi->pestat;
- setup_timer(&devstat->stats_timer, i40iw_hw_stats_timeout,
- (unsigned long)vsi);
+ timer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0);
mod_timer(&devstat->stats_timer,
jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 62be0a41ad0b..3c6f3ce88f89 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -69,7 +69,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
props->max_qp = iwdev->max_qp - iwdev->used_qps;
- props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
+ props->max_qp_wr = I40IW_MAX_QP_WRS;
props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
props->max_cq = iwdev->max_cq - iwdev->used_cqs;
props->max_cqe = iwdev->max_cqe;
@@ -381,22 +381,6 @@ static int i40iw_dealloc_pd(struct ib_pd *ibpd)
}
/**
- * i40iw_qp_roundup - return round up qp ring size
- * @wr_ring_size: ring size to round up
- */
-static int i40iw_qp_roundup(u32 wr_ring_size)
-{
- int scount = 1;
-
- if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
- wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
-
- for (wr_ring_size--; scount <= 16; scount *= 2)
- wr_ring_size |= wr_ring_size >> scount;
- return ++wr_ring_size;
-}
-
-/**
* i40iw_get_pbl - Retrieve pbl from a list given a virtual
* address
* @va: user virtual address
@@ -515,21 +499,19 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
{
struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
u32 sqdepth, rqdepth;
- u32 sq_size, rq_size;
u8 sqshift;
u32 size;
enum i40iw_status_code status;
struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
- sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
- rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
-
- status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
+ i40iw_get_wqe_shift(ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
+ status = i40iw_get_sqdepth(ukinfo->sq_size, sqshift, &sqdepth);
if (status)
return -ENOMEM;
- sqdepth = sq_size << sqshift;
- rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
+ status = i40iw_get_rqdepth(ukinfo->rq_size, I40IW_MAX_RQ_WQE_SHIFT, &rqdepth);
+ if (status)
+ return -ENOMEM;
size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
@@ -559,8 +541,8 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
- ukinfo->sq_size = sq_size;
- ukinfo->rq_size = rq_size;
+ ukinfo->sq_size = sqdepth >> sqshift;
+ ukinfo->rq_size = rqdepth >> I40IW_MAX_RQ_WQE_SHIFT;
ukinfo->qp_id = iwqp->ibqp.qp_num;
return 0;
}
@@ -2204,6 +2186,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ukqp = &iwqp->sc_qp.qp_uk;
spin_lock_irqsave(&iwqp->lock, flags);
+
+ if (iwqp->flush_issued) {
+ err = -EINVAL;
+ goto out;
+ }
+
while (ib_wr) {
inv_stag = false;
memset(&info, 0, sizeof(info));
@@ -2346,6 +2334,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ib_wr = ib_wr->next;
}
+out:
if (err)
*bad_wr = ib_wr;
else
@@ -2378,6 +2367,12 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
memset(&post_recv, 0, sizeof(post_recv));
spin_lock_irqsave(&iwqp->lock, flags);
+
+ if (iwqp->flush_issued) {
+ err = -EINVAL;
+ goto out;
+ }
+
while (ib_wr) {
post_recv.num_sges = ib_wr->num_sge;
post_recv.wr_id = ib_wr->wr_id;