diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_ll2.c')
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_ll2.c | 326 |
1 files changed, 187 insertions, 139 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 037e5978787e..ed274f033626 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1,33 +1,7 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> @@ -54,6 +28,7 @@ #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_ll2.h" @@ -69,6 +44,8 @@ #define QED_LL2_TX_SIZE (256) #define QED_LL2_RX_SIZE (4096) +#define QED_LL2_INVALID_STATS_ID 0xff + struct qed_cb_ll2_info { int rx_cnt; u32 rx_size; @@ -88,6 +65,29 @@ struct qed_ll2_buffer { dma_addr_t phys_addr; }; +static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn, + u8 ll2_queue_type, u8 qid) +{ + u8 stats_id; + + /* For legacy (RAM based) queues, the stats_id will be set as the + * queue_id. Otherwise (context based queue), it will be set to + * the "abs_pf_id" offset from the end of the RAM based queue IDs. + * If the final value exceeds the total counters amount, return + * INVALID value to indicate that the stats for this connection should + * be disabled. + */ + if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) + stats_id = qid; + else + stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id; + + if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS) + return stats_id; + else + return QED_LL2_INVALID_STATS_ID; +} + static void qed_ll2b_complete_tx_packet(void *cxt, u8 connection_handle, void *cookie, @@ -132,7 +132,7 @@ static int qed_ll2_alloc_buffer(struct qed_dev *cdev, } static int qed_ll2_dealloc_buffer(struct qed_dev *cdev, - struct qed_ll2_buffer *buffer) + struct qed_ll2_buffer *buffer) { spin_lock_bh(&cdev->ll2->lock); @@ -353,6 +353,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) unsigned long flags; int rc = -EINVAL; + if (!p_ll2_conn) + return rc; + spin_lock_irqsave(&p_tx->lock, flags); if (p_tx->b_completing_packet) { rc = -EBUSY; @@ -375,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) num_bds_in_packet = p_pkt->bd_used; list_del(&p_pkt->list_entry); - if (num_bds < num_bds_in_packet) { + if (unlikely(num_bds < num_bds_in_packet)) { DP_NOTICE(p_hwfn, "Rest of BDs does not cover whole packet\n"); goto out; @@ -485,7 +488,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, if (!list_empty(&p_rx->active_descq)) p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); - if (!p_pkt) { + if (unlikely(!p_pkt)) { DP_NOTICE(p_hwfn, "[%d] LL2 Rx completion but active_descq is empty\n", p_ll2_conn->input.conn_type); @@ -498,7 +501,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data); else qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data); - if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) + if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)) DP_NOTICE(p_hwfn, "Mismatch between active_descq and the LL2 Rx chain\n"); @@ -526,7 +529,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) unsigned long flags = 0; int rc = 0; + if (!p_ll2_conn) + return rc; + spin_lock_irqsave(&p_rx->lock, flags); + + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) { + spin_unlock_irqrestore(&p_rx->lock, flags); + return 0; + } + cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); @@ -611,18 +623,18 @@ static bool qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, struct core_rx_slow_path_cqe *p_cqe) { - struct ooo_opaque *iscsi_ooo; + struct ooo_opaque *ooo_opq; u32 cid; if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) return false; - iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data; - if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES) + ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data; + if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES) return false; /* Need to make a flush */ - cid = le32_to_cpu(iscsi_ooo->cid); + cid = le32_to_cpu(ooo_opq->cid); qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); return true; @@ -638,7 +650,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, union core_rx_cqe_union *cqe = NULL; u16 cq_new_idx = 0, cq_old_idx = 0; struct qed_ooo_buffer *p_buffer; - struct ooo_opaque *iscsi_ooo; + struct ooo_opaque *ooo_opq; u8 placement_offset = 0; u8 cqe_type; @@ -659,7 +671,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, &cqe->rx_cqe_sp)) continue; - if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { + if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) { DP_NOTICE(p_hwfn, "Got a non-regular LB LL2 completion [type 0x%02x]\n", cqe_type); @@ -671,22 +683,21 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags); packet_length = le16_to_cpu(p_cqe_fp->packet_length); vlan = le16_to_cpu(p_cqe_fp->vlan); - iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data; - qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, - iscsi_ooo); - cid = le32_to_cpu(iscsi_ooo->cid); + ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data; + qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq); + cid = le32_to_cpu(ooo_opq->cid); /* Process delete isle first */ - if (iscsi_ooo->drop_size) + if (ooo_opq->drop_size) qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->drop_isle, - iscsi_ooo->drop_size); + ooo_opq->drop_isle, + ooo_opq->drop_size); - if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP) + if (ooo_opq->ooo_opcode == TCP_EVENT_NOP) continue; /* Now process create/add/join isles */ - if (list_empty(&p_rx->active_descq)) { + if (unlikely(list_empty(&p_rx->active_descq))) { DP_NOTICE(p_hwfn, "LL2 OOO RX chain has no submitted buffers\n" ); @@ -696,12 +707,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); - if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) { - if (!p_pkt) { + if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN || + ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) { + if (unlikely(!p_pkt)) { DP_NOTICE(p_hwfn, "LL2 OOO RX packet is not valid\n"); return -EIO; @@ -715,19 +726,19 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, qed_chain_consume(&p_rx->rxq_chain); list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); - switch (iscsi_ooo->ooo_opcode) { + switch (ooo_opq->ooo_opcode) { case TCP_EVENT_ADD_NEW_ISLE: qed_ooo_add_new_isle(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle, + ooo_opq->ooo_isle, p_buffer); break; case TCP_EVENT_ADD_ISLE_RIGHT: qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle, + ooo_opq->ooo_isle, p_buffer, QED_OOO_RIGHT_BUF); break; @@ -735,7 +746,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle, + ooo_opq->ooo_isle, p_buffer, QED_OOO_LEFT_BUF); break; @@ -743,13 +754,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle + - 1, + ooo_opq->ooo_isle + 1, p_buffer, QED_OOO_LEFT_BUF); qed_ooo_join_isles(p_hwfn, p_hwfn->p_ooo_info, - cid, iscsi_ooo->ooo_isle); + cid, ooo_opq->ooo_isle); break; case TCP_EVENT_ADD_PEN: num_ooo_add_to_peninsula++; @@ -761,7 +771,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, } else { DP_NOTICE(p_hwfn, "Unexpected event (%d) TX OOO completion\n", - iscsi_ooo->ooo_opcode); + ooo_opq->ooo_opcode); } } @@ -847,6 +857,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; int rc; + if (!p_ll2_conn) + return 0; + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) return 0; @@ -870,13 +883,16 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) u16 new_idx = 0, num_bds = 0; int rc; - if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) + if (unlikely(!p_ll2_conn)) + return 0; + + if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn))) return 0; new_idx = le16_to_cpu(*p_tx->p_fw_cons); num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); - if (!num_bds) + if (unlikely(!num_bds)) return 0; while (num_bds) { @@ -885,10 +901,10 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) p_pkt = list_first_entry(&p_tx->active_descq, struct qed_ll2_tx_packet, list_entry); - if (!p_pkt) + if (unlikely(!p_pkt)) return -EINVAL; - if (p_pkt->bd_used != 1) { + if (unlikely(p_pkt->bd_used != 1)) { DP_NOTICE(p_hwfn, "Unexpectedly many BDs(%d) in TX OOO completion\n", p_pkt->bd_used); @@ -986,7 +1002,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) && p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE && - conn_type != QED_LL2_TYPE_IWARP) { + conn_type != QED_LL2_TYPE_IWARP && + (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) { p_ramrod->mf_si_bcast_accept_all = 1; p_ramrod->mf_si_mcast_accept_all = 1; } else { @@ -1015,7 +1032,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; - if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) + if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)) p_ll2_conn->tx_stats_en = 0; else p_ll2_conn->tx_stats_en = 1; @@ -1063,8 +1080,8 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, case QED_LL2_TYPE_FCOE: p_ramrod->conn_type = PROTOCOLID_FCOE; break; - case QED_LL2_TYPE_ISCSI: - p_ramrod->conn_type = PROTOCOLID_ISCSI; + case QED_LL2_TYPE_TCP_ULP: + p_ramrod->conn_type = PROTOCOLID_TCP_ULP; break; case QED_LL2_TYPE_ROCE: p_ramrod->conn_type = PROTOCOLID_ROCE; @@ -1073,8 +1090,9 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->conn_type = PROTOCOLID_IWARP; break; case QED_LL2_TYPE_OOO: - if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) - p_ramrod->conn_type = PROTOCOLID_ISCSI; + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || + p_hwfn->hw_info.personality == QED_PCI_NVMETCP) + p_ramrod->conn_type = PROTOCOLID_TCP_ULP; else p_ramrod->conn_type = PROTOCOLID_IWARP; break; @@ -1130,6 +1148,7 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg); /* Get SPQ entry */ @@ -1151,6 +1170,12 @@ static int qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { + struct qed_chain_init_params params = { + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = p_ll2_info->input.rx_num_desc, + }; + struct qed_dev *cdev = p_hwfn->cdev; struct qed_ll2_rx_packet *p_descq; u32 capacity; int rc = 0; @@ -1158,13 +1183,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, if (!p_ll2_info->input.rx_num_desc) goto out; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_NEXT_PTR, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.rx_num_desc, - sizeof(struct core_rx_bd), - &p_ll2_info->rx_queue.rxq_chain, NULL); + params.mode = QED_CHAIN_MODE_NEXT_PTR; + params.elem_size = sizeof(struct core_rx_bd); + + rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, ¶ms); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); goto out; @@ -1180,13 +1202,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, } p_ll2_info->rx_queue.descq_array = p_descq; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.rx_num_desc, - sizeof(struct core_rx_fast_path_cqe), - &p_ll2_info->rx_queue.rcq_chain, NULL); + params.mode = QED_CHAIN_MODE_PBL; + params.elem_size = sizeof(struct core_rx_fast_path_cqe); + + rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, ¶ms); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); goto out; @@ -1203,29 +1222,30 @@ out: static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = p_ll2_info->input.tx_num_desc, + .elem_size = sizeof(struct core_tx_bd), + }; struct qed_ll2_tx_packet *p_descq; - u32 desc_size; + size_t desc_size; u32 capacity; int rc = 0; if (!p_ll2_info->input.tx_num_desc) goto out; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.tx_num_desc, - sizeof(struct core_tx_bd), - &p_ll2_info->tx_queue.txq_chain, NULL); + rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain, + ¶ms); if (rc) goto out; capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain); - /* First element is part of the packet, rest are flexibly added */ - desc_size = (sizeof(*p_descq) + - (p_ll2_info->input.tx_max_bds_per_packet - 1) * - sizeof(p_descq->bds_set)); + /* All bds_set elements are flexibily added. */ + desc_size = struct_size(p_descq, bds_set, + p_ll2_info->input.tx_max_bds_per_packet); p_descq = kcalloc(capacity, desc_size, GFP_KERNEL); if (!p_descq) { @@ -1538,7 +1558,7 @@ static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn, int qed_ll2_establish_connection(void *cxt, u8 connection_handle) { - struct e4_core_conn_context *p_cxt; + struct core_conn_context *p_cxt; struct qed_ll2_tx_packet *p_pkt; struct qed_ll2_info *p_ll2_conn; struct qed_hwfn *p_hwfn = cxt; @@ -1548,8 +1568,8 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) struct qed_ptt *p_ptt; int rc = -EINVAL; u32 i, capacity; - u32 desc_size; - u8 qid; + size_t desc_size; + u8 qid, stats_id; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) @@ -1582,10 +1602,9 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) INIT_LIST_HEAD(&p_tx->sending_descq); spin_lock_init(&p_tx->lock); capacity = qed_chain_get_capacity(&p_tx->txq_chain); - /* First element is part of the packet, rest are flexibly added */ - desc_size = (sizeof(*p_pkt) + - (p_ll2_conn->input.tx_max_bds_per_packet - 1) * - sizeof(p_pkt->bds_set)); + /* All bds_set elements are flexibily added. */ + desc_size = struct_size(p_pkt, bds_set, + p_ll2_conn->input.tx_max_bds_per_packet); for (i = 0; i < capacity; i++) { p_pkt = p_tx->descq_mem + desc_size * i; @@ -1616,16 +1635,32 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle, p_ll2_conn->input.rx_conn_type); + stats_id = qed_ll2_handle_to_stats_id(p_hwfn, + p_ll2_conn->input.rx_conn_type, + qid); p_ll2_conn->queue_id = qid; - p_ll2_conn->tx_stats_id = qid; + p_ll2_conn->tx_stats_id = stats_id; - DP_VERBOSE(p_hwfn, QED_MSG_LL2, - "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n", - p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid); + /* If there is no valid stats id for this connection, disable stats */ + if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) { + p_ll2_conn->tx_stats_en = 0; + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "Disabling stats for queue %d - not enough counters\n", + qid); + } + + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n", + p_hwfn->rel_pf_id, + p_ll2_conn->input.rx_conn_type, qid, stats_id); if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { - p_rx->set_prod_addr = p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid); + p_rx->set_prod_addr = + (u8 __iomem *)p_hwfn->regview + + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_LL2_RX_PRODS, qid); } else { /* QED_LL2_RX_TYPE_CTX - using doorbell */ p_rx->ctx_based = 1; @@ -1660,7 +1695,8 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) if (rc) goto out; - if (!QED_IS_RDMA_PERSONALITY(p_hwfn)) + if (!QED_IS_RDMA_PERSONALITY(p_hwfn) && + !QED_IS_NVMETCP_PERSONALITY(p_hwfn)) qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1); qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); @@ -1751,6 +1787,8 @@ int qed_ll2_post_rx_buffer(void *cxt, if (!p_ll2_conn) return -EINVAL; p_rx = &p_ll2_conn->rx_queue; + if (!p_rx->set_prod_addr) + return -EIO; spin_lock_irqsave(&p_rx->lock, flags); if (!list_empty(&p_rx->free_descq)) @@ -1765,7 +1803,7 @@ int qed_ll2_post_rx_buffer(void *cxt, } } - /* If we're lacking entires, let's try to flush buffers to FW */ + /* If we're lacking entries, let's try to flush buffers to FW */ if (!p_curp || !p_curb) { rc = -EBUSY; p_curp = NULL; @@ -1824,6 +1862,7 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, enum core_roce_flavor_type roce_flavor; enum core_tx_dest tx_dest; u16 bd_data = 0, frag_idx; + u16 bitfield1; roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE : CORE_RROCE; @@ -1844,8 +1883,8 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, } start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); - if (QED_IS_IWARP_PERSONALITY(p_hwfn) && - p_ll2->input.conn_type == QED_LL2_TYPE_OOO) { + if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) && + p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); } else { @@ -1855,9 +1894,11 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, pkt->remove_stag = true; } - SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, - cpu_to_le16(pkt->l4_hdr_offset_w)); - SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); + bitfield1 = le16_to_cpu(start_bd->bitfield1); + SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w); + SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest); + start_bd->bitfield1 = cpu_to_le16(bitfield1); + bd_data |= pkt->bd_flags; SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); @@ -1964,28 +2005,29 @@ int qed_ll2_prepare_tx_packet(void *cxt, int rc = 0; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); - if (!p_ll2_conn) + if (unlikely(!p_ll2_conn)) return -EINVAL; p_tx = &p_ll2_conn->tx_queue; p_tx_chain = &p_tx->txq_chain; - if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet) + if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)) return -EIO; spin_lock_irqsave(&p_tx->lock, flags); - if (p_tx->cur_send_packet) { + if (unlikely(p_tx->cur_send_packet)) { rc = -EEXIST; goto out; } /* Get entry, but only if we have tx elements for it */ - if (!list_empty(&p_tx->free_descq)) + if (unlikely(!list_empty(&p_tx->free_descq))) p_curp = list_first_entry(&p_tx->free_descq, struct qed_ll2_tx_packet, list_entry); - if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds) + if (unlikely(p_curp && + qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)) p_curp = NULL; - if (!p_curp) { + if (unlikely(!p_curp)) { rc = -EBUSY; goto out; } @@ -2014,16 +2056,16 @@ int qed_ll2_set_fragment_of_tx_packet(void *cxt, unsigned long flags; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); - if (!p_ll2_conn) + if (unlikely(!p_ll2_conn)) return -EINVAL; - if (!p_ll2_conn->tx_queue.cur_send_packet) + if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet)) return -EINVAL; p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet; cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num; - if (cur_send_frag_num >= p_cur_send_packet->bd_used) + if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used)) return -EINVAL; /* Fill the BD information, and possibly notify FW */ @@ -2331,7 +2373,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev, cdev->ll2->cb_cookie = cookie; } -struct qed_ll2_cbs ll2_cbs = { +static struct qed_ll2_cbs ll2_cbs = { .rx_comp_cb = &qed_ll2b_complete_rx_packet, .rx_release_cb = &qed_ll2b_release_rx_packet, .tx_comp_cb = &qed_ll2b_complete_tx_packet, @@ -2399,7 +2441,8 @@ out: static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev) { return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) || - QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev))) && + QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) || + QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) && (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev)); } @@ -2425,11 +2468,13 @@ static int qed_ll2_stop(struct qed_dev *cdev) if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE) return 0; + if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) + qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address); qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address); eth_zero_addr(cdev->ll2_mac_address); - if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) + if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) qed_ll2_stop_ooo(p_hwfn); /* In CMT mode, LL2 is always started on engine 0 for a storage PF */ @@ -2465,7 +2510,8 @@ static int __qed_ll2_start(struct qed_hwfn *p_hwfn, conn_type = QED_LL2_TYPE_FCOE; break; case QED_PCI_ISCSI: - conn_type = QED_LL2_TYPE_ISCSI; + case QED_PCI_NVMETCP: + conn_type = QED_LL2_TYPE_TCP_ULP; break; case QED_PCI_ETH_ROCE: conn_type = QED_LL2_TYPE_ROCE; @@ -2590,7 +2636,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) } } - if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) { + if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); rc = qed_ll2_start_ooo(p_hwfn, params); if (rc) { @@ -2599,10 +2645,12 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) } } - rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address); - if (rc) { - DP_NOTICE(cdev, "Failed to add an LLH filter\n"); - goto err3; + if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) { + rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address); + if (rc) { + DP_NOTICE(cdev, "Failed to add an LLH filter\n"); + goto err3; + } } ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); @@ -2610,7 +2658,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) return 0; err3: - if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) + if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) qed_ll2_stop_ooo(p_hwfn); err2: if (b_is_storage_eng1) @@ -2644,7 +2692,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, */ nr_frags = skb_shinfo(skb)->nr_frags; - if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { + if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) { DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", 1 + nr_frags); return -EINVAL; @@ -2686,7 +2734,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, */ rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle, &pkt, 1); - if (rc) + if (unlikely(rc)) goto err; for (i = 0; i < nr_frags; i++) { @@ -2710,7 +2758,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, /* if failed not much to do here, partial packet has been posted * we can't free memory, will need to wait for completion */ - if (rc) + if (unlikely(rc)) goto err2; } |