aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c232
1 files changed, 132 insertions, 100 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d147610a06f..b8a5fb0c32d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -162,19 +162,8 @@ MODULE_FIRMWARE(FW6_FNAME);
static uint force_init;
module_param(force_init, uint, 0644);
-MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
-
-/*
- * Normally if the firmware we connect to has Configuration File support, we
- * use that and only fall back to the old Driver-based initialization if the
- * Configuration File fails for some reason. If force_old_init is set, then
- * we'll always use the old Driver-based initialization sequence.
- */
-static uint force_old_init;
-
-module_param(force_old_init, uint, 0644);
-MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
- " parameter");
+MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
+ "deprecated parameter");
static int dflt_msg_enable = DFLT_MSG_ENABLE;
@@ -196,23 +185,6 @@ module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
/*
- * Queue interrupt hold-off timer values. Queues default to the first of these
- * upon creation.
- */
-static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
-
-module_param_array(intr_holdoff, uint, NULL, 0644);
-MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
- "0..4 in microseconds, deprecated parameter");
-
-static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
-
-module_param_array(intr_cnt, uint, NULL, 0644);
-MODULE_PARM_DESC(intr_cnt,
- "thresholds 1..3 for queue interrupt packet counters, "
- "deprecated parameter");
-
-/*
* Normally we tell the chip to deliver Ingress Packets into our DMA buffers
* offset by 2 bytes in order to have the IP headers line up on 4-byte
* boundaries. This is a requirement for many architectures which will throw
@@ -226,13 +198,7 @@ MODULE_PARM_DESC(intr_cnt,
*/
static int rx_dma_offset = 2;
-static bool vf_acls;
-
#ifdef CONFIG_PCI_IOV
-module_param(vf_acls, bool, 0644);
-MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
- "deprecated parameter");
-
/* Configure the number of PCI-E Virtual Function which are to be instantiated
* on SR-IOV Capable Physical Functions.
*/
@@ -253,12 +219,6 @@ module_param(select_queue, int, 0644);
MODULE_PARM_DESC(select_queue,
"Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
-static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
-
-module_param(tp_vlan_pri_map, uint, 0644);
-MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
- "deprecated parameter");
-
static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
@@ -766,8 +726,8 @@ static void name_msix_vecs(struct adapter *adap)
}
/* offload queues */
- for_each_ofldrxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
+ for_each_iscsirxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
adap->port[0]->name, i);
for_each_rdmarxq(&adap->sge, i)
@@ -782,7 +742,7 @@ static void name_msix_vecs(struct adapter *adap)
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
+ int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -799,11 +759,11 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
- for_each_ofldrxq(s, ofldqidx) {
+ for_each_iscsirxq(s, iscsiqidx) {
err = request_irq(adap->msix_info[msi_index].vec,
t4_sge_intr_msix, 0,
adap->msix_info[msi_index].desc,
- &s->ofldrxq[ofldqidx].rspq);
+ &s->iscsirxq[iscsiqidx].rspq);
if (err)
goto unwind;
msi_index++;
@@ -835,9 +795,9 @@ unwind:
while (--rdmaqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
- while (--ofldqidx >= 0)
+ while (--iscsiqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
- &s->ofldrxq[ofldqidx].rspq);
+ &s->iscsirxq[iscsiqidx].rspq);
while (--ethqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->ethrxq[ethqidx].rspq);
@@ -853,8 +813,9 @@ static void free_msix_queue_irqs(struct adapter *adap)
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
for_each_ethrxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
- for_each_ofldrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
+ for_each_iscsirxq(s, i)
+ free_irq(adap->msix_info[msi_index++].vec,
+ &s->iscsirxq[i].rspq);
for_each_rdmarxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
for_each_rdmaciq(s, i)
@@ -1093,8 +1054,8 @@ freeout: t4_free_sge_resources(adap);
}
}
- j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
- for_each_ofldrxq(s, i) {
+ j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */
+ for_each_iscsirxq(s, i) {
err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
adap->port[i / j],
s->fw_evtq.cntxt_id);
@@ -1110,7 +1071,7 @@ freeout: t4_free_sge_resources(adap);
msi_idx += nq; \
} while (0)
- ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
+ ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq);
ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
@@ -1181,16 +1142,10 @@ static int set_filter_wr(struct adapter *adapter, int fidx)
*/
if (f->fs.newdmac || f->fs.newvlan) {
/* allocate L2T entry for new filter */
- f->l2t = t4_l2t_alloc_switching(adapter->l2t);
+ f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
if (f->l2t == NULL) {
kfree_skb(skb);
- return -EAGAIN;
- }
- if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
- f->fs.eport, f->fs.dmac)) {
- cxgb4_l2t_release(f->l2t);
- f->l2t = NULL;
- kfree_skb(skb);
return -ENOMEM;
}
}
@@ -1511,7 +1466,7 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
else
stid = -1;
} else {
- stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
+ stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
if (stid < 0)
stid = -1;
}
@@ -1525,7 +1480,7 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
if (family == PF_INET)
t->stids_in_use++;
else
- t->stids_in_use += 4;
+ t->stids_in_use += 2;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -1576,13 +1531,13 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
if (family == PF_INET)
__clear_bit(stid, t->stid_bmap);
else
- bitmap_release_region(t->stid_bmap, stid, 2);
+ bitmap_release_region(t->stid_bmap, stid, 1);
t->stid_tab[stid].data = NULL;
if (stid < t->nstids) {
if (family == PF_INET)
t->stids_in_use--;
else
- t->stids_in_use -= 4;
+ t->stids_in_use -= 2;
} else {
t->sftids_in_use--;
}
@@ -2283,7 +2238,7 @@ static void disable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
+ for_each_iscsirxq(&adap->sge, i)
disable_txq_db(&adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
disable_txq_db(&adap->sge.ctrlq[i].q);
@@ -2295,7 +2250,7 @@ static void enable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
+ for_each_iscsirxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
@@ -2365,7 +2320,7 @@ static void recover_all_queues(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
+ for_each_iscsirxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
@@ -2449,10 +2404,10 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.nrxq = adap->sge.rdmaqs;
lli.nciq = adap->sge.rdmaciqs;
} else if (uld == CXGB4_ULD_ISCSI) {
- lli.rxq_ids = adap->sge.ofld_rxq;
- lli.nrxq = adap->sge.ofldqsets;
+ lli.rxq_ids = adap->sge.iscsi_rxq;
+ lli.nrxq = adap->sge.iscsiqsets;
}
- lli.ntxq = adap->sge.ofldqsets;
+ lli.ntxq = adap->sge.iscsiqsets;
lli.nchan = adap->params.nports;
lli.nports = adap->params.nports;
lli.wr_cred = adap->params.ofldq_wr_cred;
@@ -3146,16 +3101,6 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
if (ret < 0)
return ret;
- /* select capabilities we'll be using */
- if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
- if (!vf_acls)
- c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
- else
- c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
- } else if (vf_acls) {
- dev_err(adap->pdev_dev, "virtualization ACLs not supported");
- return ret;
- }
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
@@ -4348,11 +4293,11 @@ static void cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
if (n10g) {
- i = min_t(int, ARRAY_SIZE(s->ofldrxq),
+ i = min_t(int, ARRAY_SIZE(s->iscsirxq),
num_online_cpus());
- s->ofldqsets = roundup(i, adap->params.nports);
+ s->iscsiqsets = roundup(i, adap->params.nports);
} else
- s->ofldqsets = adap->params.nports;
+ s->iscsiqsets = adap->params.nports;
/* For RDMA one Rx queue per channel suffices */
s->rdmaqs = adap->params.nports;
/* Try and allow at least 1 CIQ per cpu rounding down
@@ -4383,8 +4328,8 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
s->ofldtxq[i].q.size = 1024;
- for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
- struct sge_ofld_rxq *r = &s->ofldrxq[i];
+ for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
+ struct sge_ofld_rxq *r = &s->iscsirxq[i];
init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
r->rspq.uld = CXGB4_ULD_ISCSI;
@@ -4465,7 +4410,7 @@ static int enable_msix(struct adapter *adap)
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
+ want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets;
/* need nchan for each possible ULD */
ofld_need = 3 * nchan;
}
@@ -4504,13 +4449,13 @@ static int enable_msix(struct adapter *adap)
/* leftovers go to OFLD */
i = allocated - EXTRA_VECS - s->max_ethqsets -
s->rdmaqs - s->rdmaciqs;
- s->ofldqsets = (i / nchan) * nchan; /* round down */
+ s->iscsiqsets = (i / nchan) * nchan; /* round down */
}
for (i = 0; i < allocated; ++i)
adap->msix_info[i].vec = entries[i].vector;
dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
"nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
- allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs,
+ allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
s->rdmaciqs);
kfree(entries);
@@ -4538,6 +4483,79 @@ static int init_rss(struct adapter *adap)
return 0;
}
+static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ u32 lnkcap1, lnkcap2;
+ int err1, err2;
+
+#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
+ &lnkcap1);
+ err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
+ &lnkcap2);
+ if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ *speed = PCIE_SPEED_8_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ if (!err1) {
+ *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
+ if (!lnkcap2) { /* pre-r3.0 */
+ if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ }
+
+ if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
+ return err1 ? err1 : err2 ? err2 : -EINVAL;
+ return 0;
+}
+
+static void cxgb4_check_pcie_caps(struct adapter *adap)
+{
+ enum pcie_link_width width, width_cap;
+ enum pci_bus_speed speed, speed_cap;
+
+#define PCIE_SPEED_STR(speed) \
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
+ "Unknown")
+
+ if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
+ dev_warn(adap->pdev_dev,
+ "Unable to determine PCIe device BW capabilities\n");
+ return;
+ }
+
+ if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
+ dev_warn(adap->pdev_dev,
+ "Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
+ PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
+ dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
+ width, width_cap);
+ if (speed < speed_cap || width < width_cap)
+ dev_info(adap->pdev_dev,
+ "A slot with more lanes and/or higher speed is "
+ "suggested for optimal performance.\n");
+}
+
static void print_port_info(const struct net_device *dev)
{
char buf[80];
@@ -4565,10 +4583,10 @@ static void print_port_info(const struct net_device *dev)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n",
adap->params.vpd.id,
CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
- is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
+ is_offload(adap) ? "R" : "",
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
netdev_info(dev, "S/N: %s, P/N: %s\n",
@@ -4787,8 +4805,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure SGE_STAT_CFG_A to read WC stats */
if (!is_t4(adapter->params.chip))
- t4_write_reg(adapter, SGE_STAT_CFG_A,
- STATSOURCE_T5_V(7) | STATMODE_V(0));
+ t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
+ (is_t5(adapter->params.chip) ? STATMODE_V(0) :
+ T6_STATMODE_V(0)));
for_each_port(adapter, i) {
struct net_device *netdev;
@@ -4865,15 +4884,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#if IS_ENABLED(CONFIG_IPV6)
- adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
- adapter->clipt_end);
- if (!adapter->clipt) {
- /* We tolerate a lack of clip_table, giving up
- * some functionality
+ if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
+ (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
+ /* CLIP functionality is not present in hardware,
+ * hence disable all offload features
*/
dev_warn(&pdev->dev,
- "could not allocate Clip table, continuing\n");
+ "CLIP not enabled in hardware, continuing\n");
adapter->params.offload = 0;
+ } else {
+ adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+ adapter->clipt_end);
+ if (!adapter->clipt) {
+ /* We tolerate a lack of clip_table, giving up
+ * some functionality
+ */
+ dev_warn(&pdev->dev,
+ "could not allocate Clip table, continuing\n");
+ adapter->params.offload = 0;
+ }
}
#endif
if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
@@ -4904,6 +4933,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (msi > 0 && pci_enable_msi(pdev) == 0)
adapter->flags |= USING_MSI;
+ /* check for PCI Express bandwidth capabiltites */
+ cxgb4_check_pcie_caps(adapter);
+
err = init_rss(adapter);
if (err)
goto out_free_dev;