diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed')
24 files changed, 1364 insertions, 330 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d9a03aba0e02..127c89b22ef0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -53,7 +53,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_MAJOR_VERSION 8 -#define QED_MINOR_VERSION 33 +#define QED_MINOR_VERSION 37 #define QED_REVISION_VERSION 0 #define QED_ENGINEERING_VERSION 20 @@ -296,6 +296,12 @@ enum qed_wol_support { QED_WOL_SUPPORT_PME, }; +enum qed_db_rec_exec { + DB_REC_DRY_RUN, + DB_REC_REAL_DEAL, + DB_REC_ONCE, +}; + struct qed_hw_info { /* PCI personality */ enum qed_pci_personality personality; @@ -425,6 +431,18 @@ struct qed_qm_info { u8 num_pf_rls; }; +#define QED_OVERFLOW_BIT 1 + +struct qed_db_recovery_info { + struct list_head list; + + /* Lock to protect the doorbell recovery mechanism list */ + spinlock_t lock; + bool dorq_attn; + u32 db_recovery_counter; + unsigned long overflow; +}; + struct storm_stats { u32 address; u32 len; @@ -522,6 +540,7 @@ struct qed_simd_fp_handler { enum qed_slowpath_wq_flag { QED_SLOWPATH_MFW_TLV_REQ, + QED_SLOWPATH_PERIODIC_DB_REC, }; struct qed_hwfn { @@ -539,7 +558,6 @@ struct qed_hwfn { u8 dp_level; char name[NAME_SIZE]; - bool first_on_engine; bool hw_init_done; u8 num_funcs_on_engine; @@ -640,6 +658,9 @@ struct qed_hwfn { /* L2-related */ struct qed_l2_info *p_l2_info; + /* Mechanism for recovering from doorbell drop */ + struct qed_db_recovery_info db_recovery_info; + /* Nvm images number and attributes */ struct qed_nvm_image_info nvm_info; @@ -652,11 +673,12 @@ struct qed_hwfn { struct delayed_work iov_task; unsigned long iov_task_flags; #endif - - struct z_stream_s *stream; + struct z_stream_s *stream; + bool slowpath_wq_active; struct workqueue_struct *slowpath_wq; struct delayed_work slowpath_task; unsigned long slowpath_task_flags; + u32 periodic_db_rec_count; }; struct pci_params { @@ -735,6 +757,7 @@ struct qed_dev { #define CHIP_BOND_ID_SHIFT 0 u8 num_engines; + u8 num_ports; u8 num_ports_in_engine; u8 num_funcs_in_port; @@ -786,6 +809,9 @@ struct qed_dev { u32 mcp_nvm_resp; + /* Recovery */ + bool recov_in_prog; + /* Linux specific here */ struct qede_dev *edev; struct pci_dev *pdev; @@ -871,7 +897,6 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_device_num_engines(struct qed_dev *cdev); -int qed_device_get_port_id(struct qed_dev *cdev); void qed_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); @@ -897,6 +922,11 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) +/* doorbell recovery mechanism */ +void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); +bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); + /* Other Linux specific common definitions */ #define DP_NAME(cdev) ((cdev)->name) @@ -912,6 +942,10 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); writel((u32)val, (void __iomem *)((u8 __iomem *)\ (cdev->doorbells) + (db_addr))) +#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ + qed_device_num_ports((_p_hwfn)->cdev)) +int qed_device_num_ports(struct qed_dev *cdev); + /* Prototypes */ int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info); @@ -919,6 +953,7 @@ void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf); +void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn); void qed_get_protocol_stats(struct qed_dev *cdev, enum qed_mcp_protocol_type type, union qed_mcp_protocol_stats *stats); @@ -931,4 +966,6 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, union qed_mfw_tlv_data *tlv_data); void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc); + +void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index dc1c1b616084..e61d1d905415 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -936,9 +936,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) u32 size = min_t(u32, total_size, psz); void **p_virt = &p_mngr->t2[i].p_virt; - *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, - size, &p_mngr->t2[i].p_phys, - GFP_KERNEL); + *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, + &p_mngr->t2[i].p_phys, + GFP_KERNEL); if (!p_mngr->t2[i].p_virt) { rc = -ENOMEM; goto t2_fail; @@ -1054,8 +1054,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, u32 size; size = min_t(u32, sz_left, p_blk->real_size_in_page); - p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, - &p_phys, GFP_KERNEL); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, + &p_phys, GFP_KERNEL); if (!p_virt) return -ENOMEM; @@ -2129,17 +2129,18 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) rdma_tasks); /* no need for break since RoCE coexist with Ethernet */ } + /* fall through */ case QED_PCI_ETH: { struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; - if (!p_params->num_vf_cons) - p_params->num_vf_cons = - ETH_PF_PARAMS_VF_CONS_DEFAULT; - qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, - p_params->num_cons, - p_params->num_vf_cons); + if (!p_params->num_vf_cons) + p_params->num_vf_cons = + ETH_PF_PARAMS_VF_CONS_DEFAULT; + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons, + p_params->num_vf_cons); p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; break; } @@ -2306,9 +2307,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, goto out0; } - p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, - p_blk->real_size_in_page, &p_phys, - GFP_KERNEL); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_blk->real_size_in_page, &p_phys, + GFP_KERNEL); if (!p_virt) { rc = -ENOMEM; goto out1; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 88a8576ca9ce..866cdc86a3f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -66,6 +66,301 @@ static DEFINE_SPINLOCK(qm_lock); +/******************** Doorbell Recovery *******************/ +/* The doorbell recovery mechanism consists of a list of entries which represent + * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each + * entity needs to register with the mechanism and provide the parameters + * describing it's doorbell, including a location where last used doorbell data + * can be found. The doorbell execute function will traverse the list and + * doorbell all of the registered entries. + */ +struct qed_db_recovery_entry { + struct list_head list_entry; + void __iomem *db_addr; + void *db_data; + enum qed_db_rec_width db_width; + enum qed_db_rec_space db_space; + u8 hwfn_idx; +}; + +/* Display a single doorbell recovery entry */ +static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, + struct qed_db_recovery_entry *db_entry, + char *action) +{ + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", + action, + db_entry, + db_entry->db_addr, + db_entry->db_data, + db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", + db_entry->db_space == DB_REC_USER ? "user" : "kernel", + db_entry->hwfn_idx); +} + +/* Doorbell address sanity (address within doorbell bar range) */ +static bool qed_db_rec_sanity(struct qed_dev *cdev, + void __iomem *db_addr, + enum qed_db_rec_width db_width, + void *db_data) +{ + u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64; + + /* Make sure doorbell address is within the doorbell bar */ + if (db_addr < cdev->doorbells || + (u8 __iomem *)db_addr + width > + (u8 __iomem *)cdev->doorbells + cdev->db_size) { + WARN(true, + "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", + db_addr, + cdev->doorbells, + (u8 __iomem *)cdev->doorbells + cdev->db_size); + return false; + } + + /* ake sure doorbell data pointer is not null */ + if (!db_data) { + WARN(true, "Illegal doorbell data pointer: %p", db_data); + return false; + } + + return true; +} + +/* Find hwfn according to the doorbell address */ +static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev, + void __iomem *db_addr) +{ + struct qed_hwfn *p_hwfn; + + /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */ + if (cdev->num_hwfns > 1) + p_hwfn = db_addr < cdev->hwfns[1].doorbells ? + &cdev->hwfns[0] : &cdev->hwfns[1]; + else + p_hwfn = QED_LEADING_HWFN(cdev); + + return p_hwfn; +} + +/* Add a new entry to the doorbell recovery mechanism */ +int qed_db_recovery_add(struct qed_dev *cdev, + void __iomem *db_addr, + void *db_data, + enum qed_db_rec_width db_width, + enum qed_db_rec_space db_space) +{ + struct qed_db_recovery_entry *db_entry; + struct qed_hwfn *p_hwfn; + + /* Shortcircuit VFs, for now */ + if (IS_VF(cdev)) { + DP_VERBOSE(cdev, + QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); + return 0; + } + + /* Sanitize doorbell address */ + if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data)) + return -EINVAL; + + /* Obtain hwfn from doorbell address */ + p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); + + /* Create entry */ + db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL); + if (!db_entry) { + DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n"); + return -ENOMEM; + } + + /* Populate entry */ + db_entry->db_addr = db_addr; + db_entry->db_data = db_data; + db_entry->db_width = db_width; + db_entry->db_space = db_space; + db_entry->hwfn_idx = p_hwfn->my_id; + + /* Display */ + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list); + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); + + return 0; +} + +/* Remove an entry from the doorbell recovery mechanism */ +int qed_db_recovery_del(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data) +{ + struct qed_db_recovery_entry *db_entry = NULL; + struct qed_hwfn *p_hwfn; + int rc = -EINVAL; + + /* Shortcircuit VFs, for now */ + if (IS_VF(cdev)) { + DP_VERBOSE(cdev, + QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); + return 0; + } + + /* Obtain hwfn from doorbell address */ + p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) { + /* search according to db_data addr since db_addr is not unique (roce) */ + if (db_entry->db_data == db_data) { + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting"); + list_del(&db_entry->list_entry); + rc = 0; + break; + } + } + + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); + + if (rc == -EINVAL) + + DP_NOTICE(p_hwfn, + "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", + db_data, db_addr); + else + kfree(db_entry); + + return rc; +} + +/* Initialize the doorbell recovery mechanism */ +static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn) +{ + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n"); + + /* Make sure db_size was set in cdev */ + if (!p_hwfn->cdev->db_size) { + DP_ERR(p_hwfn->cdev, "db_size not set\n"); + return -EINVAL; + } + + INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list); + spin_lock_init(&p_hwfn->db_recovery_info.lock); + p_hwfn->db_recovery_info.db_recovery_counter = 0; + + return 0; +} + +/* Destroy the doorbell recovery mechanism */ +static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n"); + if (!list_empty(&p_hwfn->db_recovery_info.list)) { + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); + while (!list_empty(&p_hwfn->db_recovery_info.list)) { + db_entry = + list_first_entry(&p_hwfn->db_recovery_info.list, + struct qed_db_recovery_entry, + list_entry); + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); + list_del(&db_entry->list_entry); + kfree(db_entry); + } + } + p_hwfn->db_recovery_info.db_recovery_counter = 0; +} + +/* Print the content of the doorbell recovery mechanism */ +void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + DP_NOTICE(p_hwfn, + "Displaying doorbell recovery database. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) { + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); + } + + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); +} + +/* Ring the doorbell of a single doorbell recovery entry */ +static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, + struct qed_db_recovery_entry *db_entry) +{ + /* Print according to width */ + if (db_entry->db_width == DB_REC_WIDTH_32B) { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "ringing doorbell address %p data %x\n", + db_entry->db_addr, + *(u32 *)db_entry->db_data); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "ringing doorbell address %p data %llx\n", + db_entry->db_addr, + *(u64 *)(db_entry->db_data)); + } + + /* Sanity */ + if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, + db_entry->db_width, db_entry->db_data)) + return; + + /* Flush the write combined buffer. Since there are multiple doorbelling + * entities using the same address, if we don't flush, a transaction + * could be lost. + */ + wmb(); + + /* Ring the doorbell */ + if (db_entry->db_width == DB_REC_WIDTH_32B) + DIRECT_REG_WR(db_entry->db_addr, + *(u32 *)(db_entry->db_data)); + else + DIRECT_REG_WR64(db_entry->db_addr, + *(u64 *)(db_entry->db_data)); + + /* Flush the write combined buffer. Next doorbell may come from a + * different entity to the same address... + */ + wmb(); +} + +/* Traverse the doorbell recovery entry list and ring all the doorbells */ +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); + + /* Track amount of times recovery was executed */ + p_hwfn->db_recovery_info.db_recovery_counter++; + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) + qed_db_recovery_ring(p_hwfn, db_entry); + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); +} + +/******************** Doorbell Recovery end ****************/ + #define QED_MIN_DPIS (4) #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) @@ -194,6 +489,9 @@ void qed_resc_free(struct qed_dev *cdev) qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); qed_dbg_user_data_free(p_hwfn); + + /* Destroy doorbell recovery mechanism */ + qed_db_recovery_teardown(p_hwfn); } } @@ -480,19 +778,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, /* get pq index according to PQ_FLAGS */ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, - u32 pq_flags) + unsigned long pq_flags) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; /* Can't have multiple flags set here */ - if (bitmap_weight((unsigned long *)&pq_flags, + if (bitmap_weight(&pq_flags, sizeof(pq_flags) * BITS_PER_BYTE) > 1) { - DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); + DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags); goto err; } if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { - DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); + DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags); goto err; } @@ -969,6 +1267,11 @@ int qed_resc_alloc(struct qed_dev *cdev) struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u32 n_eqes, num_cons; + /* Initialize the doorbell recovery mechanism */ + rc = qed_db_recovery_setup(p_hwfn); + if (rc) + goto alloc_err; + /* First allocate the context manager structure */ rc = qed_cxt_mngr_alloc(p_hwfn); if (rc) @@ -1468,6 +1771,14 @@ enum QED_ROCE_EDPM_MODE { QED_ROCE_EDPM_MODE_DISABLE = 2, }; +bool qed_edpm_enabled(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) + return false; + + return true; +} + static int qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -1537,13 +1848,13 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->wid_count = (u16) n_cpus; DP_INFO(p_hwfn, - "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", + "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count, - ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? - "disabled" : "enabled"); + (!qed_edpm_enabled(p_hwfn)) ? + "disabled" : "enabled", PAGE_SIZE); if (rc) { DP_ERR(p_hwfn, @@ -1631,11 +1942,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); - /* Cleanup chip from previous driver if such remains exist */ - rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); - if (rc) - return rc; - /* Sanity check before the PF init sequence that uses DMAE */ rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); if (rc) @@ -1679,17 +1985,15 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, return rc; } -static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 enable) +int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool b_enable) { - u32 delay_idx = 0, val, set_val = enable ? 1 : 0; + u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; - /* Change PF in PXP */ - qed_wr(p_hwfn, p_ptt, - PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); + /* Configure the PF's internal FID_enable for master transactions */ + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); - /* wait until value is set - try for 1 second every 50us */ + /* Wait until value is set - try for 1 second every 50us */ for (delay_idx = 0; delay_idx < 20000; delay_idx++) { val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); @@ -1743,13 +2047,19 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn, return 0; } +static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, + BIT(p_hwfn->abs_pf_id)); +} + int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) { struct qed_load_req_params load_req_params; u32 load_code, resp, param, drv_mb_param; bool b_default_mtu = true; struct qed_hwfn *p_hwfn; - int rc = 0, mfw_rc, i; + int rc = 0, i; u16 ether_type; if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { @@ -1764,7 +2074,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + p_hwfn = &cdev->hwfns[i]; /* If management didn't provide a default, set one of our own */ if (!p_hwfn->hw_info.mtu) { @@ -1777,9 +2087,6 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) continue; } - /* Enable DMAE in PXP */ - rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); - rc = qed_calc_hw_mode(p_hwfn); if (rc) return rc; @@ -1816,12 +2123,43 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) "Load request was sent. Load code: 0x%x\n", load_code); + /* Only relevant for recovery: + * Clear the indication after LOAD_REQ is responded by the MFW. + */ + cdev->recov_in_prog = false; + qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); - p_hwfn->first_on_engine = (load_code == - FW_MSG_CODE_DRV_LOAD_ENGINE); + /* Clean up chip from previous driver if such remains exist. + * This is not needed when the PF is the first one on the + * engine, since afterwards we are going to init the FW. + */ + if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { + rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->rel_pf_id, false); + if (rc) { + DP_NOTICE(p_hwfn, "Final cleanup failed\n"); + goto load_err; + } + } + + /* Log and clear previous pglue_b errors if such exist */ + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); + + /* Enable the PF's internal FID_enable in the PXP */ + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, + true); + if (rc) + goto load_err; + + /* Clear the pglue_b was_error indication. + * In E4 it must be done after the BME and the internal + * FID_enable for the PF are set, since VDMs may cause the + * indication to be set again. + */ + qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: @@ -1852,39 +2190,29 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) break; } - if (rc) + if (rc) { DP_NOTICE(p_hwfn, "init phase failed for loadcode 0x%x (rc %d)\n", - load_code, rc); + load_code, rc); + goto load_err; + } - /* ACK mfw regardless of success or failure of initialization */ - mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, - DRV_MSG_CODE_LOAD_DONE, - 0, &load_code, ¶m); + rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); if (rc) return rc; - if (mfw_rc) { - DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n"); - return mfw_rc; - } - - /* Check if there is a DID mismatch between nvm-cfg/efuse */ - if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) - DP_NOTICE(p_hwfn, - "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); /* send DCBX attention request command */ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "sending phony dcbx set command to trigger DCBx attention handling\n"); - mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, - DRV_MSG_CODE_SET_DCBX, - 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, - &load_code, ¶m); - if (mfw_rc) { + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, + &resp, ¶m); + if (rc) { DP_NOTICE(p_hwfn, "Failed to send DCBX attention request\n"); - return mfw_rc; + return rc; } p_hwfn->hw_init_done = true; @@ -1933,6 +2261,12 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } return 0; + +load_err: + /* The MFW load lock should be released also when initialization fails. + */ + qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); + return rc; } #define QED_HW_STOP_RETRY_LIMIT (10) @@ -1945,6 +2279,9 @@ static void qed_hw_timers_stop(struct qed_dev *cdev, qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); + if (cdev->recov_in_prog) + return; + for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { if ((!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN)) && @@ -2007,12 +2344,14 @@ int qed_hw_stop(struct qed_dev *cdev) p_hwfn->hw_init_done = false; /* Send unload command to MCP */ - rc = qed_mcp_unload_req(p_hwfn, p_ptt); - if (rc) { - DP_NOTICE(p_hwfn, - "Failed sending a UNLOAD_REQ command. rc = %d.\n", - rc); - rc2 = -EINVAL; + if (!cdev->recov_in_prog) { + rc = qed_mcp_unload_req(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed sending a UNLOAD_REQ command. rc = %d.\n", + rc); + rc2 = -EINVAL; + } } qed_slowpath_irq_sync(p_hwfn); @@ -2054,27 +2393,31 @@ int qed_hw_stop(struct qed_dev *cdev) qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); - qed_mcp_unload_done(p_hwfn, p_ptt); - if (rc) { - DP_NOTICE(p_hwfn, - "Failed sending a UNLOAD_DONE command. rc = %d.\n", - rc); - rc2 = -EINVAL; + if (!cdev->recov_in_prog) { + rc = qed_mcp_unload_done(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed sending a UNLOAD_DONE command. rc = %d.\n", + rc); + rc2 = -EINVAL; + } } } - if (IS_PF(cdev)) { + if (IS_PF(cdev) && !cdev->recov_in_prog) { p_hwfn = QED_LEADING_HWFN(cdev); p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; - /* Disable DMAE in PXP - in CMT, this should only be done for - * first hw-function, and only after all transactions have - * stopped for all active hw-functions. + /* Clear the PF's internal FID_enable in the PXP. + * In CMT this should only be done for first hw-function, and + * only after all transactions have stopped for all active + * hw-functions. */ - rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false); + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); if (rc) { DP_NOTICE(p_hwfn, - "qed_change_pci_hwfn failed. rc = %d.\n", rc); + "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); rc2 = -EINVAL; } } @@ -2174,9 +2517,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); } - /* Clean Previous errors if such exist */ - qed_wr(p_hwfn, p_hwfn->p_main_ptt, - PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); + /* Clean previous pglue_b errors if such exist */ + qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); /* enable internal target-read */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, @@ -2910,55 +3252,43 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); } -static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) -{ - u32 port_mode; - - port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); - - if (port_mode < 3) { - p_hwfn->cdev->num_ports_in_engine = 1; - } else if (port_mode <= 5) { - p_hwfn->cdev->num_ports_in_engine = 2; - } else { - DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", - p_hwfn->cdev->num_ports_in_engine); - - /* Default num_ports_in_engine to something */ - p_hwfn->cdev->num_ports_in_engine = 1; - } -} - -static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 port; - int i; - - p_hwfn->cdev->num_ports_in_engine = 0; + u32 addr, global_offsize, global_addr, port_mode; + struct qed_dev *cdev = p_hwfn->cdev; - for (i = 0; i < MAX_NUM_PORTS_K2; i++) { - port = qed_rd(p_hwfn, p_ptt, - CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4)); - if (port & 1) - p_hwfn->cdev->num_ports_in_engine++; + /* In CMT there is always only one port */ + if (cdev->num_hwfns > 1) { + cdev->num_ports_in_engine = 1; + cdev->num_ports = 1; + return; } - if (!p_hwfn->cdev->num_ports_in_engine) { - DP_NOTICE(p_hwfn, "All NIG ports are inactive\n"); - - /* Default num_ports_in_engine to something */ - p_hwfn->cdev->num_ports_in_engine = 1; + /* Determine the number of ports per engine */ + port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE); + switch (port_mode) { + case 0x0: + cdev->num_ports_in_engine = 1; + break; + case 0x1: + cdev->num_ports_in_engine = 2; + break; + case 0x2: + cdev->num_ports_in_engine = 4; + break; + default: + DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode); + cdev->num_ports_in_engine = 1; /* Default to something */ + break; } -} -static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) -{ - if (QED_IS_BB(p_hwfn->cdev)) - qed_hw_info_port_num_bb(p_hwfn, p_ptt); - else - qed_hw_info_port_num_ah(p_hwfn, p_ptt); + /* Get the total number of ports of the device */ + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = qed_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + addr = global_addr + offsetof(struct public_global, max_ports); + cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr); } static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -2996,7 +3326,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, return rc; } - qed_hw_info_port_num(p_hwfn, p_ptt); + if (IS_LEAD_HWFN(p_hwfn)) + qed_hw_info_port_num(p_hwfn, p_ptt); qed_mcp_get_capabilities(p_hwfn, p_ptt); @@ -3112,6 +3443,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_doorbells, enum qed_pci_personality personality) { + struct qed_dev *cdev = p_hwfn->cdev; int rc = 0; /* Split PCI bars evenly between hwfns */ @@ -3164,7 +3496,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, /* Sending a mailbox to the MFW should be done after qed_get_hw_info() * is called as it sets the ports number in an engine. */ - if (IS_LEAD_HWFN(p_hwfn)) { + if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) { rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); if (rc) DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); @@ -4400,23 +4732,9 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); } -int qed_device_num_engines(struct qed_dev *cdev) -{ - return QED_IS_BB(cdev) ? 2 : 1; -} - -static int qed_device_num_ports(struct qed_dev *cdev) -{ - /* in CMT always only one port */ - if (cdev->num_hwfns > 1) - return 1; - - return cdev->num_ports_in_engine * qed_device_num_engines(cdev); -} - -int qed_device_get_port_id(struct qed_dev *cdev) +int qed_device_num_ports(struct qed_dev *cdev) { - return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); + return cdev->num_ports; } void qed_set_fw_mac_addr(__le16 *fw_msb, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index defdda1ffaa2..e4b4e3b78e8a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -472,6 +472,46 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); +/** + * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER + * + * @param p_hwfn + * @param p_ptt + * @param b_enable - true/false + * + * @return int + */ +int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool b_enable); + +/** + * @brief db_recovery_add - add doorbell information to the doorbell + * recovery mechanism. + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address of where db_data is stored + * @param db_width - doorbell is 32b pr 64b + * @param db_space - doorbell recovery addresses are user or kernel space + */ +int qed_db_recovery_add(struct qed_dev *cdev, + void __iomem *db_addr, + void *db_data, + enum qed_db_rec_width db_width, + enum qed_db_rec_space db_space); + +/** + * @brief db_recovery_del - remove doorbell information from the doorbell + * recovery mechanism. db_data serves as key (db_addr is not unique). + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address where db_data is stored. Serves as key for the + * entry to delete. + */ +int qed_db_recovery_del(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data); + const char *qed_hw_get_resc_name(enum qed_resources res_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b38e12c9de9d..37edaa847512 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12655,6 +12655,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 #define DRV_MB_PARAM_NVM_LEN_OFFSET 24 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 @@ -12795,6 +12796,7 @@ struct public_drv_mb { #define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 /* get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 #define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 @@ -12814,6 +12816,11 @@ struct public_drv_mb { union drv_union_data union_data; }; +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 + enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LINK_CHANGE, MFW_DRV_MSG_FLR_FW_ACK_FAILED, @@ -12821,7 +12828,7 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LLDP_DATA_UPDATED, MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, - MFW_DRV_MSG_RESERVED4, + MFW_DRV_MSG_ERROR_RECOVERY, MFW_DRV_MSG_BW_UPDATE, MFW_DRV_MSG_S_TAG_UPDATE, MFW_DRV_MSG_GET_LAN_STATS, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 70504dcf4087..72ec1c6bdf70 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -703,6 +703,17 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, int qed_status = 0; u32 offset = 0; + if (p_hwfn->cdev->recov_in_prog) { + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n", + src_addr, src_type, dst_addr, dst_type, + size_in_dwords); + + /* Let the flow complete w/o any error handling */ + return 0; + } + qed_dmae_opcode(p_hwfn, (src_type == QED_DMAE_ADDRESS_GRC), (dst_type == QED_DMAE_ADDRESS_GRC), diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index b22f464ea3fa..8848d5bed6e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -255,138 +255,298 @@ out: #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) #define PGLUE_ATTENTION_ILT_VALID (1 << 23) -static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn) + +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) { u32 tmp; - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_TX_ERR_WR_DETAILS2); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); if (tmp & PGLUE_ATTENTION_VALID) { u32 addr_lo, addr_hi, details; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_ADD_63_32); - details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - DP_INFO(p_hwfn, - "Illegal write by chip to [%08x:%08x] blocked.\n" - "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + DP_NOTICE(p_hwfn, + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); } - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_TX_ERR_RD_DETAILS2); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); if (tmp & PGLUE_ATTENTION_RD_VALID) { u32 addr_lo, addr_hi, details; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_ADD_63_32); - details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS); - DP_INFO(p_hwfn, - "Illegal read by chip from [%08x:%08x] blocked.\n" - " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 - : 0, - GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 - : 0); + DP_NOTICE(p_hwfn, + "Illegal read by chip from [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); } - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); if (tmp & PGLUE_ATTENTION_ICPL_VALID) - DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp); + DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); if (tmp & PGLUE_ATTENTION_ZLR_VALID) { u32 addr_hi, addr_lo; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); - DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n", - tmp, addr_hi, addr_lo); + DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n", + tmp, addr_hi, addr_lo); } - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_VF_ILT_ERR_DETAILS2); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); if (tmp & PGLUE_ATTENTION_ILT_VALID) { u32 addr_hi, addr_lo, details; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); - details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS); - DP_INFO(p_hwfn, - "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", - details, tmp, addr_hi, addr_lo); + DP_NOTICE(p_hwfn, + "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", + details, tmp, addr_hi, addr_lo); } /* Clear the indications */ - qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2)); return 0; } -#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) -#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) -#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) -#define QED_DORQ_ATTENTION_SIZE_SHIFT (16) -static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) { - u32 reason; + return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); +} - reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) & - QED_DORQ_ATTENTION_REASON_MASK; - if (reason) { - u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - DORQ_REG_DB_DROP_DETAILS); +#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) +#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) +#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) +#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) +#define QED_DORQ_ATTENTION_SIZE_SHIFT (16) - DP_INFO(p_hwfn->cdev, - "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", - qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - DORQ_REG_DB_DROP_DETAILS_ADDRESS), - (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK), - GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, - reason); +#define QED_DB_REC_COUNT 1000 +#define QED_DB_REC_INTERVAL 100 + +static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 count = QED_DB_REC_COUNT; + u32 usage = 1; + + /* Flush any pending (e)dpms as they may never arrive */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); + + /* wait for usage to zero or count to run out. This is necessary since + * EDPM doorbell transactions can take multiple 64b cycles, and as such + * can "split" over the pci. Possibly, the doorbell drop can happen with + * half an EDPM in the queue and other half dropped. Another EDPM + * doorbell to the same address (from doorbell recovery mechanism or + * from the doorbelling entity) could have first half dropped and second + * half interpreted as continuation of the first. To prevent such + * malformed doorbells from reaching the device, flush the queue before + * releasing the overflow sticky indication. + */ + while (count-- && usage) { + usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); + udelay(QED_DB_REC_INTERVAL); + } + + /* should have been depleted by now */ + if (usage) { + DP_NOTICE(p_hwfn->cdev, + "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", + QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); + return -EBUSY; + } + + return 0; +} + +int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 attn_ovfl, cur_ovfl; + int rc; + + attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, + &p_hwfn->db_recovery_info.overflow); + cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + if (!cur_ovfl && !attn_ovfl) + return 0; + + DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", + attn_ovfl, cur_ovfl); + + if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { + rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc) + return rc; } + /* Release overflow sticky indication (stop silently dropping everything) */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); + + /* Repeat all last doorbells (doorbell drop recovery) */ + qed_db_recovery_execute(p_hwfn); + + return 0; +} + +static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) +{ + struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + u32 overflow; + int rc; + + overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + if (!overflow) + goto out; + + /* Run PF doorbell recovery in next periodic handler */ + set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); + + if (!p_hwfn->db_bar_no_edpm) { + rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc) + goto out; + } + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); +out: + /* Schedule the handler even if overflow was not detected */ + qed_periodic_db_rec_start(p_hwfn); +} + +static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) +{ + u32 int_sts, first_drop_reason, details, address, all_drops_reason; + struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + + /* int_sts may be zero since all PFs were interrupted for doorbell + * overflow but another one already handled it. Can abort here. If + * This PF also requires overflow recovery we will be interrupted again. + * The masked almost full indication may also be set. Ignoring. + */ + int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); + if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) + return 0; + + DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); + + /* check if db_drop or overflow happened */ + if (int_sts & (DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { + /* Obtain data about db drop/overflow */ + first_drop_reason = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_REASON) & + QED_DORQ_ATTENTION_REASON_MASK; + details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS); + address = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_ADDRESS); + all_drops_reason = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_REASON); + + /* Log info */ + DP_NOTICE(p_hwfn->cdev, + "Doorbell drop occurred\n" + "Address\t\t0x%08x\t(second BAR address)\n" + "FID\t\t0x%04x\t\t(Opaque FID)\n" + "Size\t\t0x%04x\t\t(in bytes)\n" + "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" + "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", + address, + GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE), + GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, + first_drop_reason, all_drops_reason); + + /* Clear the doorbell drop details and prepare for next drop */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); + + /* Mark interrupt as handled (note: even if drop was due to a different + * reason than overflow we mark as handled) + */ + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_INT_STS_WR, + DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); + + /* If there are no indications other than drop indications, success */ + if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | + DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) + return 0; + } + + /* Some other indication was present - non recoverable */ + DP_INFO(p_hwfn, "DORQ fatal attention\n"); + return -EINVAL; } +static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +{ + p_hwfn->db_recovery_info.dorq_attn = true; + qed_dorq_attn_overflow(p_hwfn); + + return qed_dorq_attn_int_sts(p_hwfn); +} + +static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->db_recovery_info.dorq_attn) + goto out; + + /* Call DORQ callback if the attention was missed */ + qed_dorq_attn_cb(p_hwfn); +out: + p_hwfn->db_recovery_info.dorq_attn = false; +} + /* Instead of major changes to the data-structure, we have a some 'special' * identifiers for sources that changed meaning between adapters. */ @@ -422,7 +582,7 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { {"PGLUE misc_flr", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"PGLUE B RBC", ATTENTION_PAR_INT, - qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B}, + qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, {"PGLUE misc_mctp", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, @@ -960,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, } } + /* Handle missed DORQ attention */ + qed_dorq_attn_handler(p_hwfn); + /* Clear IGU indication for the deasserted bits */ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_IGU_CMD + diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 54b4ee0acfd7..d473b522afc5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -190,6 +190,16 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, */ void qed_int_disable_post_isr_release(struct qed_dev *cdev); +/** + * @brief - Doorbell Recovery handler. + * Run doorbell recovery in case of PF overflow (and flush DORQ if + * needed). + * + * @param p_hwfn + * @param p_ptt + */ +int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + #define QED_CAU_DEF_RX_TIMER_RES 0 #define QED_CAU_DEF_TX_TIMER_RES 0 @@ -421,4 +431,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index beb8e5d6401a..ded556b7bab5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); + if (!ether_addr_equal(ethh->h_dest, + p_hwfn->p_rdma_info->iwarp.mac_addr)) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "Got unexpected mac %pM instead of %pM\n", + ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); + return -EINVAL; + } + ether_addr_copy(remote_mac_addr, ethh->h_source); ether_addr_copy(local_mac_addr, ethh->h_dest); @@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, struct qed_iwarp_info *iwarp_info; struct qed_ll2_acquire_data data; struct qed_ll2_cbs cbs; - u32 mpa_buff_size; + u32 buff_size; u16 n_ooo_bufs; int rc = 0; int i; @@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, memset(&data, 0, sizeof(data)); data.input.conn_type = QED_LL2_TYPE_IWARP; - data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; + data.input.mtu = params->max_mtu; data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ @@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, goto err; } + buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, QED_IWARP_LL2_SYN_RX_SIZE, - QED_IWARP_MAX_SYN_PKT_SIZE, + buff_size, iwarp_info->ll2_syn_handle); if (rc) goto err; @@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, if (rc) goto err; - mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, data.input.rx_num_desc, - mpa_buff_size, + buff_size, iwarp_info->ll2_mpa_handle); if (rc) goto err; @@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; - iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); + iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); if (!iwarp_info->mpa_intermediate_buf) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index b8f612d00241..7ac959038324 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); #define QED_IWARP_LL2_SYN_TX_SIZE (128) #define QED_IWARP_LL2_SYN_RX_SIZE (256) -#define QED_IWARP_MAX_SYN_PKT_SIZE (128) #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) #define QED_IWARP_MAX_OOO (16) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 67c02ea93906..57641728df69 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); + SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, !!(accept_filter & QED_ACCEPT_BCAST)); @@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, return rc; } + if (p_params->update_ctl_frame_check) { + p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; + p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; + } + /* Update mcast bins for VFs, PF doesn't use this functionality */ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); @@ -1889,6 +1898,7 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) : NULL; + bool b_get_port_stats; if (IS_PF(cdev)) { /* The main vport index is relative first */ @@ -1903,8 +1913,9 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, continue; } + b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn); __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, - IS_PF(cdev) ? true : false); + b_get_port_stats); out: if (IS_PF(cdev) && p_ptt) @@ -2207,7 +2218,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, u16 num_queues = 0; /* Since the feature controls only queue-zones, - * make sure we have the contexts [rx, tx, xdp] to + * make sure we have the contexts [rx, xdp, tcs] to * match. */ for_each_hwfn(cdev, i) { @@ -2217,7 +2228,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, u16 cids; cids = hwfn->pf_params.eth_pf_params.num_cons; - num_queues += min_t(u16, l2_queues, cids / 3); + cids /= (2 + info->num_tc); + num_queues += min_t(u16, l2_queues, cids); } /* queues might theoretically be >256, but interrupts' @@ -2688,7 +2700,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; - accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | + QED_ACCEPT_MCAST_UNMATCHED; } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; @@ -2860,7 +2873,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) p_hwfn = p_cid->p_owner; rc = qed_get_queue_coalesce(p_hwfn, coal, handle); if (rc) - DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); + DP_VERBOSE(cdev, QED_MSG_DEBUG, + "Unable to read queue coalescing\n"); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8d80f1095d17..7127d5aaac42 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -219,6 +219,9 @@ struct qed_sp_vport_update_params { struct qed_rss_params *rss_params; struct qed_filter_accept_flags accept_flags; struct qed_sge_tpa_params *sge_tpa_params; + u8 update_ctl_frame_check; + u8 mac_chk_en; + u8 ethtype_chk_en; }; int qed_sp_vport_update(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c6f4bab67a5f..b5f419b71287 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1085,7 +1085,14 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; - return qed_spq_post(p_hwfn, p_ent, NULL); + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr, + &p_tx->db_msg, DB_REC_WIDTH_32B, + DB_REC_KERNEL); + return rc; } static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, @@ -1119,9 +1126,11 @@ static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -1542,6 +1551,13 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells + qed_db_addr(p_ll2_conn->cid, DQ_DEMS_LEGACY); + /* prepare db data */ + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_TX_BD_PROD_CMD); + p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); if (rc) @@ -1603,6 +1619,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); rx_prod.bd_prod = cpu_to_le16(bd_prod); rx_prod.cqe_prod = cpu_to_le16(cq_prod); + + /* Make sure chain element is updated before ringing the doorbell */ + dma_wmb(); + DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); } @@ -1780,7 +1800,6 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct qed_ll2_tx_packet *p_pkt = NULL; - struct core_db_data db_msg = { 0, 0, 0 }; u16 bd_prod; /* If there are missing BDs, don't do anything now */ @@ -1809,24 +1828,19 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, list_move_tail(&p_pkt->list_entry, &p_tx->active_descq); } - SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); - SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); - SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, - DQ_XCM_CORE_TX_BD_PROD_CMD); - db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - db_msg.spq_prod = cpu_to_le16(bd_prod); + p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod); /* Make sure the BDs data is updated before ringing the doorbell */ wmb(); - DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg)); + DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg)); DP_VERBOSE(p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", p_ll2_conn->queue_id, p_ll2_conn->cid, - p_ll2_conn->input.conn_type, db_msg.spq_prod); + p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod); } int qed_ll2_prepare_tx_packet(void *cxt, @@ -2437,19 +2451,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, { struct qed_ll2_tx_pkt_info pkt; const skb_frag_t *frag; + u8 flags = 0, nr_frags; int rc = -EINVAL, i; dma_addr_t mapping; u16 vlan = 0; - u8 flags = 0; if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); return -EINVAL; } - if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { + /* Cache number of fragments from SKB since SKB may be freed by + * the completion routine after calling qed_ll2_prepare_tx_packet() + */ + nr_frags = skb_shinfo(skb)->nr_frags; + + if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", - 1 + skb_shinfo(skb)->nr_frags); + 1 + nr_frags); return -EINVAL; } @@ -2471,7 +2490,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, } memset(&pkt, 0, sizeof(pkt)); - pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; + pkt.num_of_bds = 1 + nr_frags; pkt.vlan = vlan; pkt.bd_flags = flags; pkt.tx_dest = QED_LL2_TX_DEST_NW; @@ -2482,12 +2501,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) pkt.remove_stag = true; + /* qed_ll2_prepare_tx_packet() may actually send the packet if + * there are no fragments in the skb and subsequently the completion + * routine may run and free the SKB, so no dereferencing the SKB + * beyond this point unless skb has any fragments. + */ rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, &pkt, 1); if (rc) goto err; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + for (i = 0; i < nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 1a5c1ae01474..5f01fbd3c073 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -103,6 +103,7 @@ struct qed_ll2_tx_queue { struct qed_ll2_tx_packet cur_completing_packet; u16 cur_completing_bd_idx; void __iomem *doorbell_addr; + struct core_db_data db_msg; u16 bds_idx; u16 cur_send_frag_num; u16 cur_completing_frag_num; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index fff7f04d4525..6de23b56b294 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -281,6 +281,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) dev_info->wol_support = true; + dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); + dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, @@ -359,6 +361,8 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, qed_init_dp(cdev, params->dp_module, params->dp_level); + cdev->recov_in_prog = params->recov_in_prog; + rc = qed_init_pci(cdev, pdev); if (rc) { DP_ERR(cdev, "init pci failed\n"); @@ -966,9 +970,47 @@ static void qed_update_pf_params(struct qed_dev *cdev, } } +#define QED_PERIODIC_DB_REC_COUNT 10 +#define QED_PERIODIC_DB_REC_INTERVAL_MS 100 +#define QED_PERIODIC_DB_REC_INTERVAL \ + msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) +#define QED_PERIODIC_DB_REC_WAIT_COUNT 10 +#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \ + (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT) + +static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, + enum qed_slowpath_wq_flag wq_flag, + unsigned long delay) +{ + if (!hwfn->slowpath_wq_active) + return -EINVAL; + + /* Memory barrier for setting atomic bit */ + smp_mb__before_atomic(); + set_bit(wq_flag, &hwfn->slowpath_task_flags); + smp_mb__after_atomic(); + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); + + return 0; +} + +void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) +{ + /* Reset periodic Doorbell Recovery counter */ + p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; + + /* Don't schedule periodic Doorbell Recovery if already scheduled */ + if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &p_hwfn->slowpath_task_flags)) + return; + + qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, + QED_PERIODIC_DB_REC_INTERVAL); +} + static void qed_slowpath_wq_stop(struct qed_dev *cdev) { - int i; + int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT; if (IS_VF(cdev)) return; @@ -977,6 +1019,15 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev) if (!cdev->hwfns[i].slowpath_wq) continue; + /* Stop queuing new delayed works */ + cdev->hwfns[i].slowpath_wq_active = false; + + /* Wait until the last periodic doorbell recovery is executed */ + while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &cdev->hwfns[i].slowpath_task_flags) && + sleep_count--) + msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL); + flush_workqueue(cdev->hwfns[i].slowpath_wq); destroy_workqueue(cdev->hwfns[i].slowpath_wq); } @@ -989,7 +1040,10 @@ static void qed_slowpath_task(struct work_struct *work) struct qed_ptt *ptt = qed_ptt_acquire(hwfn); if (!ptt) { - queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); + if (hwfn->slowpath_wq_active) + queue_delayed_work(hwfn->slowpath_wq, + &hwfn->slowpath_task, 0); + return; } @@ -997,6 +1051,15 @@ static void qed_slowpath_task(struct work_struct *work) &hwfn->slowpath_task_flags)) qed_mfw_process_tlv_req(hwfn, ptt); + if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &hwfn->slowpath_task_flags)) { + qed_db_rec_handler(hwfn, ptt); + if (hwfn->periodic_db_rec_count--) + qed_slowpath_delayed_work(hwfn, + QED_SLOWPATH_PERIODIC_DB_REC, + QED_PERIODIC_DB_REC_INTERVAL); + } + qed_ptt_release(hwfn, ptt); } @@ -1023,6 +1086,7 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev) } INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); + hwfn->slowpath_wq_active = true; } return 0; @@ -1939,21 +2003,30 @@ exit: * 0B | 0x3 [command index] | * 4B | b'0: check_response? | b'1-31 reserved | * 8B | File-type | reserved | + * 12B | Image length in bytes | * \----------------------------------------------------------------------/ * Start a new file of the provided type */ static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, const u8 **data, bool *check_resp) { + u32 file_type, file_size = 0; int rc; *data += 4; *check_resp = !!(**data & BIT(0)); *data += 4; + file_type = **data; DP_VERBOSE(cdev, NETIF_MSG_DRV, - "About to start a new file of type %02x\n", **data); - rc = qed_mcp_nvm_put_file_begin(cdev, **data); + "About to start a new file of type %02x\n", file_type); + if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { + *data += 4; + file_size = *((u32 *)(*data)); + } + + rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, + (u8 *)(&file_size), 4); *data += 4; return rc; @@ -2134,6 +2207,15 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, return qed_mcp_get_nvm_image(hwfn, type, buf, len); } +void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) +{ + struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; + void *cookie = p_hwfn->cdev->ops_cookie; + + if (ops && ops->schedule_recovery_handler) + ops->schedule_recovery_handler(cookie); +} + static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, void *handle) { @@ -2157,6 +2239,23 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) return status; } +static int qed_recovery_process(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + int rc = 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + rc = qed_start_recovery_process(p_hwfn, p_ptt); + + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + static int qed_update_wol(struct qed_dev *cdev, bool enabled) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); @@ -2311,10 +2410,14 @@ const struct qed_common_ops qed_common_ops_pass = { .nvm_get_image = &qed_nvm_get_image, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, + .recovery_process = &qed_recovery_process, + .recovery_prolog = &qed_recovery_prolog, .update_drv_state = &qed_update_drv_state, .update_mac = &qed_update_mac, .update_mtu = &qed_update_mtu, .update_wol = &qed_update_wol, + .db_recovery_add = &qed_db_recovery_add, + .db_recovery_del = &qed_db_recovery_del, .read_module_eeprom = &qed_read_module_eeprom, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index a96364df4320..cc27fd60d689 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1070,6 +1070,27 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, + ¶m); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send a LOAD_DONE command, rc = %d\n", rc); + return rc; + } + + /* Check if there is a DID mismatch between nvm-cfg/efuse */ + if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) + DP_NOTICE(p_hwfn, + "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); + + return 0; +} + int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_mb_params mb_params; @@ -1528,6 +1549,60 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) return 0; } +u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr); + path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn)); + + proc_kill_cnt = qed_rd(p_hwfn, p_ptt, + path_addr + + offsetof(struct public_path, process_kill)) & + PROCESS_KILL_COUNTER_MASK; + + return proc_kill_cnt; +} + +static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u32 proc_kill_cnt; + + /* Prevent possible attentions/interrupts during the recovery handling + * and till its load phase, during which they will be re-enabled. + */ + qed_int_igu_disable_int(p_hwfn, p_ptt); + + DP_NOTICE(p_hwfn, "Received a process kill indication\n"); + + /* The following operations should be done once, and thus in CMT mode + * are carried out by only the first HW function. + */ + if (p_hwfn != QED_LEADING_HWFN(cdev)) + return; + + if (cdev->recov_in_prog) { + DP_NOTICE(p_hwfn, + "Ignoring the indication since a recovery process is already in progress\n"); + return; + } + + cdev->recov_in_prog = true; + + proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt); + DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt); + + qed_schedule_recovery_handler(p_hwfn); +} + static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum MFW_DRV_MSG_TYPE type) @@ -1619,7 +1694,7 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_sp_pf_update_stag(p_hwfn); } - DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", + DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); /* Acknowledge the MFW */ @@ -1641,7 +1716,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >> OEM_CFG_CHANNEL_TYPE_OFFSET; if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) - DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val); + DP_NOTICE(p_hwfn, + "Incorrect UFP Channel type %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET; if (val == OEM_CFG_SCHED_TYPE_ETS) { @@ -1650,7 +1727,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW; } else { p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN; - DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val); + DP_NOTICE(p_hwfn, + "Unknown UFP scheduling mode %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); } qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); @@ -1665,13 +1744,15 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS; } else { p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN; - DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val); + DP_NOTICE(p_hwfn, + "Unknown Host priority control %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); } DP_NOTICE(p_hwfn, - "UFP shmem config: mode = %d tc = %d pri_type = %d\n", - p_hwfn->ufp_info.mode, - p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type); + "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n", + p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, + p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn)); } static int @@ -1752,6 +1833,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_ERROR_RECOVERY: + qed_mcp_handle_process_kill(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_GET_LAN_STATS: case MFW_DRV_MSG_GET_FCOE_STATS: case MFW_DRV_MSG_GET_ISCSI_STATS: @@ -2297,6 +2381,43 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, return 0; } +int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + + if (cdev->recov_in_prog) { + DP_NOTICE(p_hwfn, + "Avoid triggering a recovery since such a process is already in progress\n"); + return -EAGAIN; + } + + DP_NOTICE(p_hwfn, "Triggering a recovery process\n"); + qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); + + return 0; +} + +#define QED_RECOVERY_PROLOG_SLEEP_MS 100 + +int qed_recovery_prolog(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + int rc; + + /* Allow ongoing PCIe transactions to complete */ + msleep(QED_RECOVERY_PROLOG_SLEEP_MS); + + /* Clear the PF's internal FID_enable in the PXP */ + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); + if (rc) + DP_NOTICE(p_hwfn, + "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); + + return rc; +} + static int qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num) @@ -2739,24 +2860,6 @@ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf) return 0; } -int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr) -{ - struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); - struct qed_ptt *p_ptt; - u32 resp, param; - int rc; - - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) - return -EBUSY; - rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, - &resp, ¶m); - cdev->mcp_nvm_resp = resp; - qed_ptt_release(p_hwfn, p_ptt); - - return rc; -} - int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len) { @@ -2770,6 +2873,9 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, return -EBUSY; switch (cmd) { + case QED_PUT_FILE_BEGIN: + nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN; + break; case QED_PUT_FILE_DATA: nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; break; @@ -2782,10 +2888,14 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, goto out; } + buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); while (buf_idx < len) { - buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); - nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | - addr) + buf_idx; + if (cmd == QED_PUT_FILE_BEGIN) + nvm_offset = addr; + else + nvm_offset = ((buf_size << + DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) + + buf_idx; rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, &resp, ¶m, buf_size, (u32 *)&p_buf[buf_idx]); @@ -2810,7 +2920,19 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000) usleep_range(1000, 2000); - buf_idx += buf_size; + /* For MBI upgrade, MFW response includes the next buffer offset + * to be delivered to MFW. + */ + if (param && cmd == QED_PUT_FILE_DATA) { + buf_idx = QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); + buf_size = QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); + } else { + buf_idx += buf_size; + buf_size = min_t(u32, (len - buf_idx), + MCP_DRV_NVM_BUF_LEN); + } } cdev->mcp_nvm_resp = resp; @@ -3532,6 +3654,12 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, } } +bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); +} + int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 mcp_resp; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 1adfe52b3905..261c1a392e2c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -441,6 +441,38 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_mcp_drv_version *p_ver); /** + * @brief Read the MFW process kill counter + * + * @param p_hwfn + * @param p_ptt + * + * @return u32 + */ +u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief Trigger a recovery process + * + * @param p_hwfn + * @param p_ptt + * + * @return int + */ +int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * @brief A recovery handler must call this function as its first step. + * It is assumed that the handler is not run from an interrupt context. + * + * @param cdev + * @param p_ptt + * + * @return int + */ +int qed_recovery_prolog(struct qed_dev *cdev); + +/** * @brief Notify MFW about the change in base device properties * * @param p_hwfn @@ -543,16 +575,6 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len); /** - * @brief Put file begin - * - * @param cdev - * @param addr - nvm offset - * - * @return int - 0 - operation was successful. - */ -int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr); - -/** * @brief Check latest response * * @param cdev @@ -669,10 +691,6 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); rel_pfid) #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) -#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ - ((_p_hwfn)->cdev->num_ports_in_engine * \ - qed_device_num_engines((_p_hwfn)->cdev))) - struct qed_mcp_info { /* List for mailbox commands which were sent and wait for a response */ struct list_head cmd_list; @@ -811,6 +829,16 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_load_req_params *p_params); /** + * @brief Sends a LOAD_DONE message to the MFW + * + * @param p_hwfn + * @param p_ptt + * + * @return int - 0 - Operation was successful. + */ +int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** * @brief Sends a UNLOAD_REQ message to the MFW * * @param p_hwfn @@ -1116,6 +1144,16 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, enum qed_resc_lock resource, bool b_is_permanent); + +/** + * @brief - Return whether management firmware support smart AN + * + * @param p_hwfn + * + * @return bool - true if feature is supported. + */ +bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn); + /** * @brief Learn of supported MFW features; To be done during early init * diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 5a90d69dc2f8..1302b308bd87 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -47,7 +47,7 @@ static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) { - switch (qed_device_get_port_id(p_hwfn->cdev)) { + switch (MFW_PORT(p_hwfn)) { case 0: return QED_RESC_LOCK_PTP_PORT0; case 1: diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 2440970882c4..5ce825ca5f24 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -518,6 +518,8 @@ 0x180824UL #define MISC_REG_AEU_GENERAL_ATTN_0 \ 0x008400UL +#define MISC_REG_AEU_GENERAL_ATTN_35 \ + 0x00848cUL #define CAU_REG_SB_ADDR_MEMORY \ 0x1c8000UL #define CAU_REG_SB_VAR_MEMORY \ @@ -1243,6 +1245,56 @@ 0x1701534UL #define TSEM_REG_DBG_FORCE_FRAME \ 0x1701538UL +#define DORQ_REG_PF_USAGE_CNT \ + 0x1009c0UL +#define DORQ_REG_PF_OVFL_STICKY \ + 0x1009d0UL +#define DORQ_REG_DPM_FORCE_ABORT \ + 0x1009d8UL +#define DORQ_REG_INT_STS \ + 0x100180UL +#define DORQ_REG_INT_STS_ADDRESS_ERROR \ + (0x1UL << 0) +#define DORQ_REG_INT_STS_WR \ + 0x100188UL +#define DORQ_REG_DB_DROP_DETAILS_REL \ + 0x100a28UL +#define DORQ_REG_INT_STS_ADDRESS_ERROR_SHIFT \ + 0 +#define DORQ_REG_INT_STS_DB_DROP \ + (0x1UL << 1) +#define DORQ_REG_INT_STS_DB_DROP_SHIFT \ + 1 +#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR \ + (0x1UL << 2) +#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR_SHIFT \ + 2 +#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL\ + (0x1UL << 3) +#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL_SHIFT \ + 3 +#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR \ + (0x1UL << 4) +#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR_SHIFT \ + 4 +#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR \ + (0x1UL << 5) +#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR_SHIFT \ + 5 +#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR \ + (0x1UL << 6) +#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR_SHIFT \ + 6 +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR \ + (0x1UL << 7) +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR_SHIFT \ + 7 +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR \ + (0x1UL << 8) +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR_SHIFT \ + 8 +#define DORQ_REG_DB_DROP_DETAILS_REASON \ + 0x100a20UL #define MSEM_REG_DBG_SELECT \ 0x1801528UL #define MSEM_REG_DBG_DWORD_ENABLE \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 3157c0d99441..96ab77ae6af5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -227,7 +227,9 @@ struct qed_spq { u32 comp_count; u32 cid; - qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE]; + u32 db_addr_offset; + struct core_db_data db_data; + qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE]; }; /** @@ -380,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn); * @param p_hwfn */ void qed_consq_free(struct qed_hwfn *p_hwfn); +int qed_spq_pend_post(struct qed_hwfn *p_hwfn); /** * @file diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 888274fa208b..5a495fda9e9d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) p_ent->ramrod.pf_update.update_mf_vlan_flag = true; p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + p_ent->ramrod.pf_update.mf_vlan |= + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); return qed_spq_post(p_hwfn, p_ent, NULL); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 0a9c5bb0fa48..79b311b86f66 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -252,9 +252,9 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq, struct qed_spq_entry *p_ent) { struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + struct core_db_data *p_db_data = &p_spq->db_data; u16 echo = qed_chain_get_prod_idx(p_chain); struct slow_path_element *elem; - struct core_db_data db; p_ent->elem.hdr.echo = cpu_to_le16(echo); elem = qed_chain_produce(p_chain); @@ -266,27 +266,22 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, *elem = p_ent->elem; /* struct assignment */ /* send a doorbell on the slow hwfn session */ - memset(&db, 0, sizeof(db)); - SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM); - SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); - SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, - DQ_XCM_CORE_SPQ_PROD_CMD); - db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); + p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); /* make sure the SPQE is updated before the doorbell */ wmb(); - DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); + DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data); /* make sure doorbell is rang */ wmb(); DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", - qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), - p_spq->cid, db.params, db.agg_flags, - qed_chain_get_prod_idx(p_chain)); + p_spq->db_addr_offset, + p_spq->cid, + p_db_data->params, + p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain)); return 0; } @@ -402,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); + /* Attempt to post pending requests */ + spin_lock_bh(&p_hwfn->p_spq->lock); + rc = qed_spq_pend_post(p_hwfn); + spin_unlock_bh(&p_hwfn->p_spq->lock); + return rc; } @@ -490,8 +490,11 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_virt = NULL; + struct core_db_data *p_db_data; + void __iomem *db_addr; dma_addr_t p_phys = 0; u32 i, capacity; + int rc; INIT_LIST_HEAD(&p_spq->pending); INIT_LIST_HEAD(&p_spq->completion_pending); @@ -528,6 +531,25 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) /* reset the chain itself */ qed_chain_reset(&p_spq->chain); + + /* Initialize the address/data of the SPQ doorbell */ + p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY); + p_db_data = &p_spq->db_data; + memset(p_db_data, 0, sizeof(*p_db_data)); + SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_SPQ_PROD_CMD); + p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + + /* Register the SPQ doorbell with the doorbell recovery mechanism */ + db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + + p_spq->db_addr_offset); + rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data, + DB_REC_WIDTH_32B, DB_REC_KERNEL); + if (rc) + DP_INFO(p_hwfn, + "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n"); } int qed_spq_alloc(struct qed_hwfn *p_hwfn) @@ -575,11 +597,17 @@ spq_allocate_fail: void qed_spq_free(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; + void __iomem *db_addr; u32 capacity; if (!p_spq) return; + /* Delete the SPQ doorbell from the doorbell recovery mechanism */ + db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + + p_spq->db_addr_offset); + qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data); + if (p_spq->p_virt) { capacity = qed_chain_get_capacity(&p_spq->chain); dma_free_coherent(&p_hwfn->cdev->pdev->dev, @@ -744,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, return 0; } -static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) +int qed_spq_pend_post(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_ent = NULL; @@ -767,6 +795,17 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) SPQ_HIGH_PRI_RESERVE_DEFAULT); } +static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent, + u8 *fw_return_code) +{ + if (!fw_return_code) + return; + + if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE || + p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP) + *fw_return_code = RDMA_RETURN_OK; +} + /* Avoid overriding of SPQ entries when getting out-of-order completions, by * marking the completions in a bitmap and increasing the chain consumer only * for the first successive completed entries. @@ -802,6 +841,17 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, return -EINVAL; } + if (p_hwfn->cdev->recov_in_prog) { + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n", + p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id); + + /* Let the flow complete w/o any error handling */ + qed_spq_recov_set_ret_code(p_ent, fw_return_code); + return 0; + } + /* Complete the entry */ rc = qed_spq_fill_entry(p_hwfn, p_ent); @@ -882,7 +932,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *tmp; struct qed_spq_entry *found = NULL; - int rc; if (!p_hwfn) return -EINVAL; @@ -940,12 +989,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, */ qed_spq_return_entry(p_hwfn, found); - /* Attempt to post pending requests */ - spin_lock_bh(&p_spq->lock); - rc = qed_spq_pend_post(p_hwfn); - spin_unlock_bh(&p_spq->lock); - - return rc; + return 0; } int qed_consq_alloc(struct qed_hwfn *p_hwfn) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index ca6290fa0f30..2f318aaf2b05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; } else { DP_INFO(p_hwfn, - "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", + "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", vf->abs_vf_id, req->vfdev_info.eth_fp_hsi_major, req->vfdev_info.eth_fp_hsi_minor, @@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, params.vport_id = vf->vport_id; params.max_buffers_per_cqe = start->max_buffers_per_cqe; params.mtu = vf->mtu; - params.check_mac = true; + + /* Non trusted VFs should enable control frame filtering */ + params.check_mac = !vf->p_vf_info.is_trusted_configured; rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); if (rc) { @@ -4447,6 +4449,13 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) pci_disable_sriov(cdev->pdev); + if (cdev->recov_in_prog) { + DP_VERBOSE(cdev, + QED_MSG_IOV, + "Skip SRIOV disable operations in the device since a recovery is in progress\n"); + goto out; + } + for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_ptt *ptt = qed_ptt_acquire(hwfn); @@ -4486,7 +4495,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) qed_ptt_release(hwfn, ptt); } - +out: qed_iov_set_vfs_to_disable(cdev, false); return 0; @@ -5130,6 +5139,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) params.opaque_fid = vf->opaque_fid; params.vport_id = vf->vport_id; + params.update_ctl_frame_check = 1; + params.mac_chk_en = !vf_info->is_trusted_configured; + if (vf_info->rx_accept_mode & mask) { flags->update_rx_mode_config = 1; flags->rx_accept_filter = vf_info->rx_accept_mode; @@ -5147,7 +5159,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) } if (flags->update_rx_mode_config || - flags->update_tx_mode_config) + flags->update_tx_mode_config || + params.update_ctl_frame_check) qed_sp_vport_update(hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index b6cccf44bf40..5dda547772c1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; struct vf_pf_resc_request *p_resc; + u8 retry_cnt = VF_ACQUIRE_THRESH; bool resources_acquired = false; struct vfpf_acquire_tlv *req; int rc = 0, attempts = 0; @@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) /* send acquire request */ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + /* Re-try acquire in case of vf-pf hw channel timeout */ + if (retry_cnt && rc == -EBUSY) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF retrying to acquire due to VPC timeout\n"); + retry_cnt--; + continue; + } + if (rc) goto exit; |