diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_dev.c')
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_dev.c | 817 |
1 files changed, 405 insertions, 412 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 03bdd2e26329..d61cd32ec3b6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1,33 +1,7 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> @@ -51,6 +25,7 @@ #include "qed_dev_api.h" #include "qed_fcoe.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" @@ -63,6 +38,7 @@ #include "qed_sriov.h" #include "qed_vf.h" #include "qed_rdma.h" +#include "qed_nvmetcp.h" static DEFINE_SPINLOCK(qm_lock); @@ -436,7 +412,7 @@ static int qed_llh_alloc(struct qed_dev *cdev) continue; p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i; - DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %hhd\n", + DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %u\n", p_llh_info->num_ppfid, i); p_llh_info->num_ppfid++; } @@ -650,7 +626,7 @@ static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid) if (ppfid >= p_llh_info->num_ppfid) { DP_NOTICE(cdev, - "ppfid %d is not valid, available indices are 0..%hhd\n", + "ppfid %d is not valid, available indices are 0..%d\n", ppfid, p_llh_info->num_ppfid - 1); *p_abs_ppfid = 0; return -EINVAL; @@ -693,7 +669,8 @@ qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } /* Storage PF is bound to a single engine while L2 PF uses both */ - if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn)) + if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn) || + QED_IS_NVMETCP_PERSONALITY(p_hwfn)) eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0; else /* L2_PERSONALITY */ eng = QED_BOTH_ENG; @@ -975,12 +952,12 @@ qed_llh_remove_filter(struct qed_hwfn *p_hwfn, } int qed_llh_add_mac_filter(struct qed_dev *cdev, - u8 ppfid, u8 mac_addr[ETH_ALEN]) + u8 ppfid, const u8 mac_addr[ETH_ALEN]) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); union qed_llh_filter filter = {}; - u8 filter_idx, abs_ppfid; + u8 filter_idx, abs_ppfid = 0; u32 high, low, ref_cnt; int rc = 0; @@ -1190,6 +1167,9 @@ void qed_llh_remove_mac_filter(struct qed_dev *cdev, if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) goto out; + if (QED_IS_NVMETCP_PERSONALITY(p_hwfn)) + return; + ether_addr_copy(filter.mac.addr, mac_addr); rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx, &ref_cnt); @@ -1368,6 +1348,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) void qed_resc_free(struct qed_dev *cdev) { + struct qed_rdma_info *rdma_info; + struct qed_hwfn *p_hwfn; int i; if (IS_VF(cdev)) { @@ -1385,7 +1367,8 @@ void qed_resc_free(struct qed_dev *cdev) qed_llh_free(cdev); for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + p_hwfn = cdev->hwfns + i; + rdma_info = p_hwfn->p_rdma_info; qed_cxt_mngr_free(p_hwfn); qed_qm_info_free(p_hwfn); @@ -1404,15 +1387,23 @@ void qed_resc_free(struct qed_dev *cdev) qed_ooo_free(p_hwfn); } - if (QED_IS_RDMA_PERSONALITY(p_hwfn)) + if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { + qed_nvmetcp_free(p_hwfn); + qed_ooo_free(p_hwfn); + } + + if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) { + qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto); qed_rdma_info_free(p_hwfn); + } + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); qed_dbg_user_data_free(p_hwfn); - qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem); + qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem); /* Destroy doorbell recovery mechanism */ qed_db_recovery_teardown(p_hwfn); @@ -1444,6 +1435,7 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) flags |= PQ_FLAGS_OFLD; break; case QED_PCI_ISCSI: + case QED_PCI_NVMETCP: flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; break; case QED_PCI_ETH_ROCE: @@ -1493,8 +1485,8 @@ static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); /* num RLs can't exceed resource amount of rls or vports */ - num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), - RESC_NUM(p_hwfn, QED_VPORT)); + num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL), + RESC_NUM(p_hwfn, QED_VPORT)); /* Make sure after we reserve there's something left */ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) @@ -1542,8 +1534,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn) bool four_port; /* pq and vport bases for this PF */ - qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); - qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); + qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); + qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); /* rate limiting and weighted fair queueing are always enabled */ qm_info->vport_rl_en = true; @@ -1638,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) */ /* flags for pq init */ -#define PQ_INIT_SHARE_VPORT (1 << 0) -#define PQ_INIT_PF_RL (1 << 1) -#define PQ_INIT_VF_RL (1 << 2) +#define PQ_INIT_SHARE_VPORT BIT(0) +#define PQ_INIT_PF_RL BIT(1) +#define PQ_INIT_VF_RL BIT(2) /* defines for pq init */ #define PQ_INIT_DEFAULT_WRR_GROUP 1 @@ -1972,7 +1964,7 @@ static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn) return 0; if (QED_IS_ROCE_PERSONALITY(p_hwfn)) { - p_hwfn->hw_info.multi_tc_roce_en = 0; + p_hwfn->hw_info.multi_tc_roce_en = false; DP_NOTICE(p_hwfn, "multi-tc roce was disabled to reduce requested amount of pqs\n"); if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) @@ -2269,6 +2261,7 @@ int qed_resc_alloc(struct qed_dev *cdev) /* EQ */ n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { + u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn); enum protocol_type rdma_proto; if (QED_IS_ROCE_PERSONALITY(p_hwfn)) @@ -2279,11 +2272,15 @@ int qed_resc_alloc(struct qed_dev *cdev) num_cons = qed_cxt_get_proto_cid_count(p_hwfn, rdma_proto, NULL) * 2; - n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; - } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + /* EQ should be able to get events from all SRQ's + * at the same time + */ + n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq; + } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || + p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { num_cons = qed_cxt_get_proto_cid_count(p_hwfn, - PROTOCOLID_ISCSI, + PROTOCOLID_TCP_ULP, NULL); n_eqes += 2 * num_cons; } @@ -2295,7 +2292,7 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_no_mem; } - rc = qed_eq_alloc(p_hwfn, (u16) n_eqes); + rc = qed_eq_alloc(p_hwfn, (u16)n_eqes); if (rc) goto alloc_err; @@ -2330,6 +2327,15 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_err; } + if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { + rc = qed_nvmetcp_alloc(p_hwfn); + if (rc) + goto alloc_err; + rc = qed_ooo_alloc(p_hwfn); + if (rc) + goto alloc_err; + } + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { rc = qed_rdma_info_alloc(p_hwfn); if (rc) @@ -2371,6 +2377,49 @@ alloc_err: return rc; } +static int qed_fw_err_handler(struct qed_hwfn *p_hwfn, + u8 opcode, + u16 echo, + union event_ring_data *data, u8 fw_return_code) +{ + if (fw_return_code != COMMON_ERR_CODE_ERROR) + goto eqe_unexpected; + + if (data->err_data.recovery_scope == ERR_SCOPE_FUNC && + le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) { + qed_sriov_vfpf_malicious(p_hwfn, &data->err_data); + return 0; + } + +eqe_unexpected: + DP_ERR(p_hwfn, + "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n", + opcode, fw_return_code, echo); + return -EINVAL; +} + +static int qed_common_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, + u8 fw_return_code) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + case COMMON_EVENT_VF_FLR: + return qed_sriov_eqe_event(p_hwfn, opcode, echo, data, + fw_return_code); + case COMMON_EVENT_FW_ERROR: + return qed_fw_err_handler(p_hwfn, opcode, + le16_to_cpu(echo), data, + fw_return_code); + default: + DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n", + opcode, echo); + return -EINVAL; + } +} + void qed_resc_setup(struct qed_dev *cdev) { int i; @@ -2399,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev) qed_l2_setup(p_hwfn); qed_iov_setup(p_hwfn); + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, + qed_common_eqe_event); #ifdef CONFIG_QED_LL2 if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn); @@ -2410,6 +2461,11 @@ void qed_resc_setup(struct qed_dev *cdev) qed_iscsi_setup(p_hwfn); qed_ooo_setup(p_hwfn); } + + if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { + qed_nvmetcp_setup(p_hwfn); + qed_ooo_setup(p_hwfn); + } } } @@ -2421,9 +2477,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; - addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); - + addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id); if (is_vf) id += 0x10; @@ -2583,7 +2638,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, cache_line_size); } - if (L1_CACHE_BYTES > wr_mbs) + if (wr_mbs < L1_CACHE_BYTES) DP_INFO(p_hwfn, "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", L1_CACHE_BYTES, wr_mbs); @@ -2599,13 +2654,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int hw_mode) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; - struct qed_qm_common_rt_init_params params; + struct qed_qm_common_rt_init_params *params; struct qed_dev *cdev = p_hwfn->cdev; u8 vf_id, max_num_vfs; u16 num_pfs, pf_id; u32 concrete_fid; int rc = 0; + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) { + DP_NOTICE(p_hwfn->cdev, + "Failed to allocate common init params\n"); + + return -ENOMEM; + } + qed_init_cau_rt_data(cdev); /* Program GTT windows */ @@ -2618,16 +2681,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qm_info->pf_wfq_en = true; } - memset(¶ms, 0, sizeof(params)); - params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; - params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; - params.pf_rl_en = qm_info->pf_rl_en; - params.pf_wfq_en = qm_info->pf_wfq_en; - params.global_rl_en = qm_info->vport_rl_en; - params.vport_wfq_en = qm_info->vport_wfq_en; - params.port_params = qm_info->qm_port_params; + params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; + params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; + params->pf_rl_en = qm_info->pf_rl_en; + params->pf_wfq_en = qm_info->pf_wfq_en; + params->global_rl_en = qm_info->vport_rl_en; + params->vport_wfq_en = qm_info->vport_wfq_en; + params->port_params = qm_info->qm_port_params; - qed_qm_common_rt_init(p_hwfn, ¶ms); + qed_qm_common_rt_init(p_hwfn, params); qed_cxt_hw_init_common(p_hwfn); @@ -2635,7 +2697,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); if (rc) - return rc; + goto out; qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); @@ -2654,7 +2716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); - qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); @@ -2663,6 +2725,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, /* pretend to original PF */ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); +out: + kfree(params); + return rc; } @@ -2775,7 +2840,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_rdma_dpm_bar(p_hwfn, p_ptt); } - p_hwfn->wid_count = (u16) n_cpus; + p_hwfn->wid_count = (u16)n_cpus; DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", @@ -2871,7 +2936,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Protocol Configuration */ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, - (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); + ((p_hwfn->hw_info.personality == QED_PCI_ISCSI) || + (p_hwfn->hw_info.personality == QED_PCI_NVMETCP)) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); @@ -3032,6 +3098,9 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) continue; } + /* Some flows may keep variable set */ + p_hwfn->mcp_info->mcp_handling_status = 0; + rc = qed_calc_hw_mode(p_hwfn); if (rc) return rc; @@ -3085,13 +3154,15 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->rel_pf_id, false); if (rc) { - DP_NOTICE(p_hwfn, "Final cleanup failed\n"); + qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt, + QED_HW_ERR_RAMROD_FAIL, + "Final cleanup failed\n"); goto load_err; } } /* Log and clear previous pglue_b errors if such exist */ - qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); /* Enable the PF's internal FID_enable in the PXP */ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, @@ -3124,14 +3195,14 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) p_hwfn->hw_info.hw_mode); if (rc) break; - /* Fall through */ + fallthrough; case FW_MSG_CODE_DRV_LOAD_PORT: rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.hw_mode); if (rc) break; - /* Fall through */ + fallthrough; case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, p_params->p_tunn, @@ -3491,8 +3562,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) static void get_function_id(struct qed_hwfn *p_hwfn) { /* ME Register */ - p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, - PXP_PF_ME_OPAQUE_ADDR); + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); @@ -3550,14 +3621,21 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, QED_CMDQS_CQS)); + + if (QED_IS_NVMETCP_PERSONALITY(p_hwfn)) + feat_num[QED_NVMETCP_CQ] = min_t(u32, sb_cnt.cnt, + RESC_NUM(p_hwfn, + QED_CMDQS_CQS)); + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n", + "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d NVMETCP_CQ=%d #SBS=%d\n", (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ), (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), + (int)FEAT_NUM(p_hwfn, QED_NVMETCP_CQ), (int)sb_cnt.cnt); } @@ -3651,12 +3729,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type) return qed_hsi_def_val[type][chip_id]; } + static int qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resc_max_val, mcp_resp; u8 res_id; int rc; + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { switch (res_id) { case QED_LL2_RAM_QUEUE: @@ -3749,7 +3829,8 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, break; case QED_BDQ: if (p_hwfn->hw_info.personality != QED_PCI_ISCSI && - p_hwfn->hw_info.personality != QED_PCI_FCOE) + p_hwfn->hw_info.personality != QED_PCI_FCOE && + p_hwfn->hw_info.personality != QED_PCI_NVMETCP) *p_resc_num = 0; else *p_resc_num = 1; @@ -3770,7 +3851,8 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, *p_resc_start = 0; else if (p_hwfn->cdev->num_ports_in_engine == 4) *p_resc_start = p_hwfn->port_id; - else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) + else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || + p_hwfn->hw_info.personality == QED_PCI_NVMETCP) *p_resc_start = p_hwfn->port_id; else if (p_hwfn->hw_info.personality == QED_PCI_FCOE) *p_resc_start = p_hwfn->port_id + 2; @@ -3900,7 +3982,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) * resources allocation queries should be atomic. Since several PFs can * run in parallel - a resource lock is needed. * If either the resource lock or resource set value commands are not - * supported - skip the the max values setting, release the lock if + * supported - skip the max values setting, release the lock if * needed, and proceed to the queries. Other failures, including a * failure to acquire the lock, will cause this function to fail. */ @@ -3913,7 +3995,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } else if (rc == -EINVAL) { DP_INFO(p_hwfn, "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); - } else if (!rc && !resc_lock_params.b_granted) { + } else if (!resc_lock_params.b_granted) { DP_NOTICE(p_hwfn, "Failed to acquire the resource lock for the resource allocation commands\n"); return -EBUSY; @@ -3983,10 +4065,12 @@ unlock_and_exit: static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; + u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld; u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + struct qed_mcp_link_speed_params *ext_speed; struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; + int i; /* Read global nvm_cfg address */ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); @@ -4009,37 +4093,21 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4: break; default: DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); @@ -4057,8 +4125,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; link->speed.advertised_speeds = link_temp; - link_temp = link->speed.advertised_speeds; - p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; + p_caps->speed_capabilities = link->speed.advertised_speeds; link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + @@ -4093,19 +4160,40 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); } - p_hwfn->mcp_info->link_capabilities.default_speed_autoneg = - link->speed.autoneg; + p_caps->default_speed_autoneg = link->speed.autoneg; - link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; - link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; - link->pause.autoneg = !!(link_temp & - NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); - link->pause.forced_rx = !!(link_temp & - NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); - link->pause.forced_tx = !!(link_temp & - NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); + fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL); + link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); + link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); + link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); link->loopback_mode = 0; + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + switch (GET_MFW_FIELD(link_temp, + NVM_CFG1_PORT_FEC_FORCE_MODE)) { + case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE: + p_caps->fec_default |= QED_FEC_MODE_NONE; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE: + p_caps->fec_default |= QED_FEC_MODE_FIRECODE; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_RS: + p_caps->fec_default |= QED_FEC_MODE_RS; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO: + p_caps->fec_default |= QED_FEC_MODE_AUTO; + break; + default: + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "unknown FEC mode in 0x%08x\n", link_temp); + } + } else { + p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED; + } + + link->fec = p_caps->fec_default; + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, ext_phy)); @@ -4137,14 +4225,97 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; } - DP_VERBOSE(p_hwfn, - NETIF_MSG_LINK, - "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", - link->speed.forced_speed, - link->speed.advertised_speeds, - link->speed.autoneg, - link->pause.autoneg, - p_caps->default_eee, p_caps->eee_lpi_timer); + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { + ext_speed = &link->ext_speed; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + extended_speed)); + + fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED); + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN) + ext_speed->autoneg = true; + + ext_speed->forced_speed = 0; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G) + ext_speed->forced_speed |= QED_EXT_SPEED_1G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G) + ext_speed->forced_speed |= QED_EXT_SPEED_10G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G) + ext_speed->forced_speed |= QED_EXT_SPEED_20G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G) + ext_speed->forced_speed |= QED_EXT_SPEED_25G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G) + ext_speed->forced_speed |= QED_EXT_SPEED_40G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R) + ext_speed->forced_speed |= QED_EXT_SPEED_50G_R; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2) + ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4; + + fld = GET_MFW_FIELD(link_temp, + NVM_CFG1_PORT_EXTENDED_SPEED_CAP); + + ext_speed->advertised_speeds = 0; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_50G_R; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_50G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_R4; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_P4; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + extended_fec_mode)); + link->ext_fec_mode = link_temp; + + p_caps->default_ext_speed_caps = ext_speed->advertised_speeds; + p_caps->default_ext_speed = ext_speed->forced_speed; + p_caps->default_ext_autoneg = ext_speed->autoneg; + p_caps->default_ext_fec = link->ext_fec_mode; + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n", + ext_speed->forced_speed, + ext_speed->advertised_speeds, ext_speed->autoneg, + p_caps->default_ext_fec); + } + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n", + link->speed.forced_speed, link->speed.advertised_speeds, + link->speed.autoneg, link->pause.autoneg, + p_caps->default_eee, p_caps->eee_lpi_timer, + p_caps->fec_default); if (IS_LEAD_HWFN(p_hwfn)) { struct qed_dev *cdev = p_hwfn->cdev; @@ -4180,7 +4351,8 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_LL2_NON_UNICAST) | - BIT(QED_MF_INTER_PF_SWITCH); + BIT(QED_MF_INTER_PF_SWITCH) | + BIT(QED_MF_DISABLE_ARFS); break; case NVM_CFG1_GLOB_MF_MODE_DEFAULT: cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | @@ -4193,6 +4365,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", cdev->mf_bits); + + /* In CMT the PF is unknown when the GFS block processes the + * packet. Therefore cannot use searcher as it has a per PF + * database, and thus ARFS must be disabled. + * + */ + if (QED_IS_CMT(cdev)) + cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS); } DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", @@ -4217,6 +4397,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) __set_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities); + /* Read device serial number information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, serial_number); + + for (i = 0; i < 4; i++) + p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4); + return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); } @@ -4392,7 +4580,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, } if (QED_IS_ROCE_PERSONALITY(p_hwfn)) - p_hwfn->hw_info.multi_tc_roce_en = 1; + p_hwfn->hw_info.multi_tc_roce_en = true; p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; p_hwfn->hw_info.num_active_tc = 1; @@ -4461,12 +4649,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return 0; } -static void qed_nvm_info_free(struct qed_hwfn *p_hwfn) -{ - kfree(p_hwfn->nvm_info.image_att); - p_hwfn->nvm_info.image_att = NULL; -} - static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_regview, void __iomem *p_doorbells, @@ -4551,7 +4733,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, return rc; err3: if (IS_LEAD_HWFN(p_hwfn)) - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); err2: if (IS_LEAD_HWFN(p_hwfn)) qed_iov_free_hw_info(p_hwfn->cdev); @@ -4612,7 +4794,7 @@ int qed_hw_prepare(struct qed_dev *cdev, if (rc) { if (IS_PF(cdev)) { qed_init_free(p_hwfn); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); qed_mcp_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); } @@ -4646,286 +4828,7 @@ void qed_hw_remove(struct qed_dev *cdev) qed_iov_free_hw_info(cdev); - qed_nvm_info_free(p_hwfn); -} - -static void qed_chain_free_next_ptr(struct qed_dev *cdev, - struct qed_chain *p_chain) -{ - void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL; - dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; - struct qed_chain_next *p_next; - u32 size, i; - - if (!p_virt) - return; - - size = p_chain->elem_size * p_chain->usable_per_page; - - for (i = 0; i < p_chain->page_cnt; i++) { - if (!p_virt) - break; - - p_next = (struct qed_chain_next *)((u8 *)p_virt + size); - p_virt_next = p_next->next_virt; - p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); - - dma_free_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, p_virt, p_phys); - - p_virt = p_virt_next; - p_phys = p_phys_next; - } -} - -static void qed_chain_free_single(struct qed_dev *cdev, - struct qed_chain *p_chain) -{ - if (!p_chain->p_virt_addr) - return; - - dma_free_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - p_chain->p_virt_addr, p_chain->p_phys_addr); -} - -static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; - u32 page_cnt = p_chain->page_cnt, i, pbl_size; - u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table; - - if (!pp_virt_addr_tbl) - return; - - if (!p_pbl_virt) - goto out; - - for (i = 0; i < page_cnt; i++) { - if (!pp_virt_addr_tbl[i]) - break; - - dma_free_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - pp_virt_addr_tbl[i], - *(dma_addr_t *)p_pbl_virt); - - p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; - } - - pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - - if (!p_chain->b_external_pbl) - dma_free_coherent(&cdev->pdev->dev, - pbl_size, - p_chain->pbl_sp.p_virt_table, - p_chain->pbl_sp.p_phys_table); -out: - vfree(p_chain->pbl.pp_virt_addr_tbl); - p_chain->pbl.pp_virt_addr_tbl = NULL; -} - -void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - switch (p_chain->mode) { - case QED_CHAIN_MODE_NEXT_PTR: - qed_chain_free_next_ptr(cdev, p_chain); - break; - case QED_CHAIN_MODE_SINGLE: - qed_chain_free_single(cdev, p_chain); - break; - case QED_CHAIN_MODE_PBL: - qed_chain_free_pbl(cdev, p_chain); - break; - } -} - -static int -qed_chain_alloc_sanity_check(struct qed_dev *cdev, - enum qed_chain_cnt_type cnt_type, - size_t elem_size, u32 page_cnt) -{ - u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; - - /* The actual chain size can be larger than the maximal possible value - * after rounding up the requested elements number to pages, and after - * taking into acount the unusuable elements (next-ptr elements). - * The size of a "u16" chain can be (U16_MAX + 1) since the chain - * size/capacity fields are of a u32 type. - */ - if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && - chain_size > ((u32)U16_MAX + 1)) || - (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) { - DP_NOTICE(cdev, - "The actual chain size (0x%llx) is larger than the maximal possible value\n", - chain_size); - return -EINVAL; - } - - return 0; -} - -static int -qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - void *p_virt = NULL, *p_virt_prev = NULL; - dma_addr_t p_phys = 0; - u32 i; - - for (i = 0; i < p_chain->page_cnt; i++) { - p_virt = dma_alloc_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - &p_phys, GFP_KERNEL); - if (!p_virt) - return -ENOMEM; - - if (i == 0) { - qed_chain_init_mem(p_chain, p_virt, p_phys); - qed_chain_reset(p_chain); - } else { - qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, - p_virt, p_phys); - } - - p_virt_prev = p_virt; - } - /* Last page's next element should point to the beginning of the - * chain. - */ - qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, - p_chain->p_virt_addr, - p_chain->p_phys_addr); - - return 0; -} - -static int -qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - dma_addr_t p_phys = 0; - void *p_virt = NULL; - - p_virt = dma_alloc_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); - if (!p_virt) - return -ENOMEM; - - qed_chain_init_mem(p_chain, p_virt, p_phys); - qed_chain_reset(p_chain); - - return 0; -} - -static int -qed_chain_alloc_pbl(struct qed_dev *cdev, - struct qed_chain *p_chain, - struct qed_chain_ext_pbl *ext_pbl) -{ - u32 page_cnt = p_chain->page_cnt, size, i; - dma_addr_t p_phys = 0, p_pbl_phys = 0; - void **pp_virt_addr_tbl = NULL; - u8 *p_pbl_virt = NULL; - void *p_virt = NULL; - - size = page_cnt * sizeof(*pp_virt_addr_tbl); - pp_virt_addr_tbl = vzalloc(size); - if (!pp_virt_addr_tbl) - return -ENOMEM; - - /* The allocation of the PBL table is done with its full size, since it - * is expected to be successive. - * qed_chain_init_pbl_mem() is called even in a case of an allocation - * failure, since pp_virt_addr_tbl was previously allocated, and it - * should be saved to allow its freeing during the error flow. - */ - size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - - if (!ext_pbl) { - p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, - size, &p_pbl_phys, GFP_KERNEL); - } else { - p_pbl_virt = ext_pbl->p_pbl_virt; - p_pbl_phys = ext_pbl->p_pbl_phys; - p_chain->b_external_pbl = true; - } - - qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, - pp_virt_addr_tbl); - if (!p_pbl_virt) - return -ENOMEM; - - for (i = 0; i < page_cnt; i++) { - p_virt = dma_alloc_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - &p_phys, GFP_KERNEL); - if (!p_virt) - return -ENOMEM; - - if (i == 0) { - qed_chain_init_mem(p_chain, p_virt, p_phys); - qed_chain_reset(p_chain); - } - - /* Fill the PBL table with the physical address of the page */ - *(dma_addr_t *)p_pbl_virt = p_phys; - /* Keep the virtual address of the page */ - p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; - - p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; - } - - return 0; -} - -int qed_chain_alloc(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *p_chain, - struct qed_chain_ext_pbl *ext_pbl) -{ - u32 page_cnt; - int rc = 0; - - if (mode == QED_CHAIN_MODE_SINGLE) - page_cnt = 1; - else - page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); - - rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); - if (rc) { - DP_NOTICE(cdev, - "Cannot allocate a chain with the given arguments:\n"); - DP_NOTICE(cdev, - "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", - intended_use, mode, cnt_type, num_elems, elem_size); - return rc; - } - - qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use, - mode, cnt_type); - - switch (mode) { - case QED_CHAIN_MODE_NEXT_PTR: - rc = qed_chain_alloc_next_ptr(cdev, p_chain); - break; - case QED_CHAIN_MODE_SINGLE: - rc = qed_chain_alloc_single(cdev, p_chain); - break; - case QED_CHAIN_MODE_PBL: - rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl); - break; - } - if (rc) - goto nomem; - - return 0; - -nomem: - qed_chain_free(cdev, p_chain); - return rc; + qed_mcp_nvm_info_free(p_hwfn); } int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) @@ -4933,7 +4836,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { u16 min, max; - min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); + min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); DP_NOTICE(p_hwfn, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", @@ -5067,7 +4970,7 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, goto out; address = BAR0_MAP_REG_USDM_RAM + - USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct ustorm_eth_queue_zone), timeset); @@ -5106,7 +5009,7 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, goto out; address = BAR0_MAP_REG_XSDM_RAM + - XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct xstorm_eth_queue_zone), timeset); @@ -5520,3 +5423,93 @@ void qed_set_fw_mac_addr(__le16 *fw_msb, ((u8 *)fw_lsb)[0] = mac[5]; ((u8 *)fw_lsb)[1] = mac[4]; } + +static int qed_llh_shadow_remove_all_filters(struct qed_dev *cdev, u8 ppfid) +{ + struct qed_llh_info *p_llh_info = cdev->p_llh_info; + struct qed_llh_filter_info *p_filters; + int rc; + + rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "remove_all"); + if (rc) + return rc; + + p_filters = p_llh_info->pp_filters[ppfid]; + memset(p_filters, 0, NIG_REG_LLH_FUNC_FILTER_EN_SIZE * + sizeof(*p_filters)); + + return 0; +} + +static void qed_llh_clear_ppfid_filters(struct qed_dev *cdev, u8 ppfid) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u8 filter_idx, abs_ppfid; + int rc = 0; + + if (!p_ptt) + return; + + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) && + !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) + goto out; + + rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); + if (rc) + goto out; + + rc = qed_llh_shadow_remove_all_filters(cdev, ppfid); + if (rc) + goto out; + + for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; + filter_idx++) { + rc = qed_llh_remove_filter(p_hwfn, p_ptt, + abs_ppfid, filter_idx); + if (rc) + goto out; + } +out: + qed_ptt_release(p_hwfn, p_ptt); +} + +int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port) +{ + return qed_llh_add_protocol_filter(cdev, 0, + QED_LLH_FILTER_TCP_SRC_PORT, + src_port, QED_LLH_DONT_CARE); +} + +void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port) +{ + qed_llh_remove_protocol_filter(cdev, 0, + QED_LLH_FILTER_TCP_SRC_PORT, + src_port, QED_LLH_DONT_CARE); +} + +int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port) +{ + return qed_llh_add_protocol_filter(cdev, 0, + QED_LLH_FILTER_TCP_DEST_PORT, + QED_LLH_DONT_CARE, dest_port); +} + +void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port) +{ + qed_llh_remove_protocol_filter(cdev, 0, + QED_LLH_FILTER_TCP_DEST_PORT, + QED_LLH_DONT_CARE, dest_port); +} + +void qed_llh_clear_all_filters(struct qed_dev *cdev) +{ + u8 ppfid; + + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) && + !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) + return; + + for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) + qed_llh_clear_ppfid_filters(cdev, ppfid); +} |