diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 360 |
1 files changed, 258 insertions, 102 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e84613a47379..9608cb49a11c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -52,6 +52,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_ethtool.h" #include "bnxt_dcb.h" @@ -1528,12 +1529,11 @@ static int bnxt_async_event_process(struct bnxt *bp, set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); break; default: - netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", - event_id); goto async_event_process_exit; } schedule_work(&bp->sp_task); async_event_process_exit: + bnxt_ulp_async_events(bp, cmpl); return 0; } @@ -3117,27 +3117,46 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, return rc; } -static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) +int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, + int bmap_size) { struct hwrm_func_drv_rgtr_input req = {0}; - int i; DECLARE_BITMAP(async_events_bmap, 256); u32 *events = (u32 *)async_events_bmap; + int i; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); req.enables = - cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | - FUNC_DRV_RGTR_REQ_ENABLES_VER | - FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); memset(async_events_bmap, 0, sizeof(async_events_bmap)); for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) __set_bit(bnxt_async_events_arr[i], async_events_bmap); + if (bmap && bmap_size) { + for (i = 0; i < bmap_size; i++) { + if (test_bit(i, bmap)) + __set_bit(i, async_events_bmap); + } + } + for (i = 0; i < 8; i++) req.async_event_fwd[i] |= cpu_to_le32(events[i]); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) +{ + struct hwrm_func_drv_rgtr_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); + + req.enables = + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_VER); + req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); req.ver_maj = DRV_VER_MAJ; req.ver_min = DRV_VER_MIN; @@ -3146,6 +3165,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) if (BNXT_PF(bp)) { DECLARE_BITMAP(vf_req_snif_bmap, 256); u32 *data = (u32 *)vf_req_snif_bmap; + int i; memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) @@ -3527,7 +3547,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) return rc; } -static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { unsigned int ring = 0, grp_idx; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; @@ -3575,6 +3595,9 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) #endif if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); + if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) + req.flags |= + cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } @@ -4152,7 +4175,7 @@ func_qcfg_exit: return rc; } -int bnxt_hwrm_func_qcaps(struct bnxt *bp) +static int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; struct hwrm_func_qcaps_input req = {0}; @@ -4166,6 +4189,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) if (rc) goto hwrm_func_qcaps_exit; + if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)) + bp->flags |= BNXT_FLAG_ROCEV1_CAP; + if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)) + bp->flags |= BNXT_FLAG_ROCEV2_CAP; + bp->tx_push_thresh = 0; if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) @@ -4743,16 +4771,134 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, return 0; } -static int bnxt_setup_msix(struct bnxt *bp) +static void bnxt_setup_msix(struct bnxt *bp) { - struct msix_entry *msix_ent; + const int len = sizeof(bp->irq_tbl[0].name); struct net_device *dev = bp->dev; - int i, total_vecs, rc = 0, min = 1; + int tcs, i; + + tcs = netdev_get_num_tc(dev); + if (tcs > 1) { + bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; + if (bp->tx_nr_rings_per_tc == 0) { + netdev_reset_tc(dev); + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + } else { + int i, off, count; + + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; + for (i = 0; i < tcs; i++) { + count = bp->tx_nr_rings_per_tc; + off = i * count; + netdev_set_tc_queue(dev, i, count, off); + } + } + } + + for (i = 0; i < bp->cp_nr_rings; i++) { + char *attr; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + attr = "TxRx"; + else if (i < bp->rx_nr_rings) + attr = "rx"; + else + attr = "tx"; + + snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr, + i); + bp->irq_tbl[i].handler = bnxt_msix; + } +} + +static void bnxt_setup_inta(struct bnxt *bp) +{ const int len = sizeof(bp->irq_tbl[0].name); - bp->flags &= ~BNXT_FLAG_USING_MSIX; - total_vecs = bp->cp_nr_rings; + if (netdev_get_num_tc(bp->dev)) + netdev_reset_tc(bp->dev); + snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", + 0); + bp->irq_tbl[0].handler = bnxt_inta; +} + +static int bnxt_setup_int_mode(struct bnxt *bp) +{ + int rc; + + if (bp->flags & BNXT_FLAG_USING_MSIX) + bnxt_setup_msix(bp); + else + bnxt_setup_inta(bp); + + rc = bnxt_set_real_num_queues(bp); + return rc; +} + +unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_stat_ctxs; +#endif + return bp->pf.max_stat_ctxs; +} + +void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + bp->vf.max_stat_ctxs = max; + else +#endif + bp->pf.max_stat_ctxs = max; +} + +unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_cp_rings; +#endif + return bp->pf.max_cp_rings; +} + +void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + bp->vf.max_cp_rings = max; + else +#endif + bp->pf.max_cp_rings = max; +} + +static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_irqs; +#endif + return bp->pf.max_irqs; +} + +void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + bp->vf.max_irqs = max_irqs; + else +#endif + bp->pf.max_irqs = max_irqs; +} + +static int bnxt_init_msix(struct bnxt *bp) +{ + int i, total_vecs, rc = 0, min = 1; + struct msix_entry *msix_ent; + + total_vecs = bnxt_get_max_func_irqs(bp); msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); if (!msix_ent) return -ENOMEM; @@ -4773,8 +4919,10 @@ static int bnxt_setup_msix(struct bnxt *bp) bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); if (bp->irq_tbl) { - int tcs; + for (i = 0; i < total_vecs; i++) + bp->irq_tbl[i].vector = msix_ent[i].vector; + bp->total_irqs = total_vecs; /* Trim rings based upon num of vectors allocated */ rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, total_vecs, min == 1); @@ -4782,43 +4930,10 @@ static int bnxt_setup_msix(struct bnxt *bp) goto msix_setup_exit; bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - tcs = netdev_get_num_tc(dev); - if (tcs > 1) { - bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; - if (bp->tx_nr_rings_per_tc == 0) { - netdev_reset_tc(dev); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - } else { - int i, off, count; - - bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; - for (i = 0; i < tcs; i++) { - count = bp->tx_nr_rings_per_tc; - off = i * count; - netdev_set_tc_queue(dev, i, count, off); - } - } - } - bp->cp_nr_rings = total_vecs; + bp->cp_nr_rings = (min == 1) ? + max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : + bp->tx_nr_rings + bp->rx_nr_rings; - for (i = 0; i < bp->cp_nr_rings; i++) { - char *attr; - - bp->irq_tbl[i].vector = msix_ent[i].vector; - if (bp->flags & BNXT_FLAG_SHARED_RINGS) - attr = "TxRx"; - else if (i < bp->rx_nr_rings) - attr = "rx"; - else - attr = "tx"; - - snprintf(bp->irq_tbl[i].name, len, - "%s-%s-%d", dev->name, attr, i); - bp->irq_tbl[i].handler = bnxt_msix; - } - rc = bnxt_set_real_num_queues(bp); - if (rc) - goto msix_setup_exit; } else { rc = -ENOMEM; goto msix_setup_exit; @@ -4828,52 +4943,54 @@ static int bnxt_setup_msix(struct bnxt *bp) return 0; msix_setup_exit: - netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc); + netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); + kfree(bp->irq_tbl); + bp->irq_tbl = NULL; pci_disable_msix(bp->pdev); kfree(msix_ent); return rc; } -static int bnxt_setup_inta(struct bnxt *bp) +static int bnxt_init_inta(struct bnxt *bp) { - int rc; - const int len = sizeof(bp->irq_tbl[0].name); - - if (netdev_get_num_tc(bp->dev)) - netdev_reset_tc(bp->dev); - bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); - if (!bp->irq_tbl) { - rc = -ENOMEM; - return rc; - } + if (!bp->irq_tbl) + return -ENOMEM; + + bp->total_irqs = 1; bp->rx_nr_rings = 1; bp->tx_nr_rings = 1; bp->cp_nr_rings = 1; bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->irq_tbl[0].vector = bp->pdev->irq; - snprintf(bp->irq_tbl[0].name, len, - "%s-%s-%d", bp->dev->name, "TxRx", 0); - bp->irq_tbl[0].handler = bnxt_inta; - rc = bnxt_set_real_num_queues(bp); - return rc; + return 0; } -static int bnxt_setup_int_mode(struct bnxt *bp) +static int bnxt_init_int_mode(struct bnxt *bp) { int rc = 0; if (bp->flags & BNXT_FLAG_MSIX_CAP) - rc = bnxt_setup_msix(bp); + rc = bnxt_init_msix(bp); if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { /* fallback to INTA */ - rc = bnxt_setup_inta(bp); + rc = bnxt_init_inta(bp); } return rc; } +static void bnxt_clear_int_mode(struct bnxt *bp) +{ + if (bp->flags & BNXT_FLAG_USING_MSIX) + pci_disable_msix(bp->pdev); + + kfree(bp->irq_tbl); + bp->irq_tbl = NULL; + bp->flags &= ~BNXT_FLAG_USING_MSIX; +} + static void bnxt_free_irq(struct bnxt *bp) { struct bnxt_irq *irq; @@ -4892,10 +5009,6 @@ static void bnxt_free_irq(struct bnxt *bp) free_irq(irq->vector, bp->bnapi[i]); irq->requested = 0; } - if (bp->flags & BNXT_FLAG_USING_MSIX) - pci_disable_msix(bp->pdev); - kfree(bp->irq_tbl); - bp->irq_tbl = NULL; } static int bnxt_request_irq(struct bnxt *bp) @@ -5566,22 +5679,7 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) static int bnxt_open(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); - int rc = 0; - if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) { - rc = bnxt_hwrm_func_reset(bp); - if (rc) { - netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", - rc); - rc = -EBUSY; - return rc; - } - /* Do func_reset during the 1st PF open only to prevent killing - * the VFs when the PF is brought down and up. - */ - if (BNXT_PF(bp)) - set_bit(BNXT_STATE_FN_RST_DONE, &bp->state); - } return __bnxt_open_nic(bp, true, true); } @@ -6685,12 +6783,15 @@ static void bnxt_remove_one(struct pci_dev *pdev) cancel_work_sync(&bp->sp_task); bp->sp_event = 0; + bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_dcb_free(bp); pci_iounmap(pdev, bp->bar2); pci_iounmap(pdev, bp->bar1); pci_iounmap(pdev, bp->bar0); + kfree(bp->edev); + bp->edev = NULL; free_netdev(dev); pci_release_regions(pdev); @@ -6799,6 +6900,39 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); } +static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, + bool shared) +{ + int rc; + + rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); + if (rc) + return rc; + + if (bp->flags & BNXT_FLAG_ROCE_CAP) { + int max_cp, max_stat, max_irq; + + /* Reserve minimum resources for RoCE */ + max_cp = bnxt_get_max_func_cp_rings(bp); + max_stat = bnxt_get_max_func_stat_ctxs(bp); + max_irq = bnxt_get_max_func_irqs(bp); + if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || + max_irq <= BNXT_MIN_ROCE_CP_RINGS || + max_stat <= BNXT_MIN_ROCE_STAT_CTXS) + return 0; + + max_cp -= BNXT_MIN_ROCE_CP_RINGS; + max_irq -= BNXT_MIN_ROCE_CP_RINGS; + max_stat -= BNXT_MIN_ROCE_STAT_CTXS; + max_cp = min_t(int, max_cp, max_irq); + max_cp = min_t(int, max_cp, max_stat); + rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); + if (rc) + rc = 0; + } + return rc; +} + static int bnxt_set_dflt_rings(struct bnxt *bp) { int dflt_rings, max_rx_rings, max_tx_rings, rc; @@ -6807,7 +6941,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) if (sh) bp->flags |= BNXT_FLAG_SHARED_RINGS; dflt_rings = netif_get_num_default_rss_queues(); - rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); + rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); if (rc) return rc; bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); @@ -6823,6 +6957,13 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) return rc; } +void bnxt_restore_pf_fw_resources(struct bnxt *bp) +{ + ASSERT_RTNL(); + bnxt_hwrm_func_qcaps(bp); + bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); +} + static void bnxt_parse_log_pcie_link(struct bnxt *bp) { enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; @@ -6928,6 +7069,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err; + rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); + if (rc) + goto init_err; + + bp->ulp_probe = bnxt_ulp_probe; + /* Get the MAX capabilities for this function */ rc = bnxt_hwrm_func_qcaps(bp); if (rc) { @@ -6949,12 +7096,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); - if (BNXT_PF(bp)) - bp->pf.max_irqs = max_irqs; -#if defined(CONFIG_BNXT_SRIOV) - else - bp->vf.max_irqs = max_irqs; -#endif + bnxt_set_max_func_irqs(bp, max_irqs); bnxt_set_dflt_rings(bp); /* Default RSS hash cfg. */ @@ -6985,10 +7127,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err; - rc = register_netdev(dev); + rc = bnxt_hwrm_func_reset(bp); + if (rc) + goto init_err; + + rc = bnxt_init_int_mode(bp); if (rc) goto init_err; + rc = register_netdev(dev); + if (rc) + goto init_err_clr_int; + netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, (long)pci_resource_start(pdev, 0), dev->dev_addr); @@ -6997,6 +7147,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +init_err_clr_int: + bnxt_clear_int_mode(bp); + init_err: pci_iounmap(pdev, bp->bar0); pci_release_regions(pdev); @@ -7026,6 +7179,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, rtnl_lock(); netif_device_detach(netdev); + bnxt_ulp_stop(bp); + if (state == pci_channel_io_perm_failure) { rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; @@ -7034,8 +7189,6 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) bnxt_close(netdev); - /* So that func_reset will be done during slot_reset */ - clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state); pci_disable_device(pdev); rtnl_unlock(); @@ -7069,11 +7222,14 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) } else { pci_set_master(pdev); - if (netif_running(netdev)) + err = bnxt_hwrm_func_reset(bp); + if (!err && netif_running(netdev)) err = bnxt_open(netdev); - if (!err) + if (!err) { result = PCI_ERS_RESULT_RECOVERED; + bnxt_ulp_start(bp); + } } if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) |