aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c273
1 files changed, 181 insertions, 92 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a22cf932ca35..58c537f16763 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -124,7 +124,7 @@ struct filter_entry {
/* Macros needed to support the PCI Device ID Table ...
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
- static struct pci_device_id cxgb4_pci_tbl[] = {
+ static const struct pci_device_id cxgb4_pci_tbl[] = {
#define CH_PCI_DEVICE_ID_FUNCTION 0x4
/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
@@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap)
{
int i;
- for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (q && q->handler) {
@@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap)
}
}
+/* Disable interrupt and napi handler */
+static void disable_interrupts(struct adapter *adap)
+{
+ if (adap->flags & FULL_INIT_DONE) {
+ t4_intr_disable(adap);
+ if (adap->flags & USING_MSIX) {
+ free_msix_queue_irqs(adap);
+ free_irq(adap->msix_info[0].vec, adap);
+ } else {
+ free_irq(adap->pdev->irq, adap);
+ }
+ quiesce_rx(adap);
+ }
+}
+
/*
* Enable NAPI scheduling and interrupt generation for all Rx queues.
*/
@@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap)
{
int i;
- for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (!q)
@@ -957,6 +972,28 @@ static void enable_rx(struct adapter *adap)
}
}
+static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
+ unsigned int nq, unsigned int per_chan, int msi_idx,
+ u16 *ids)
+{
+ int i, err;
+
+ for (i = 0; i < nq; i++, q++) {
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false,
+ adap->port[i / per_chan],
+ msi_idx, q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
+ if (err)
+ return err;
+ memset(&q->stats, 0, sizeof(q->stats));
+ if (ids)
+ ids[i] = q->rspq.abs_id;
+ }
+ return 0;
+}
+
/**
* setup_sge_queues - configure SGE Tx/Rx/response queues
* @adap: the adapter
@@ -970,8 +1007,8 @@ static int setup_sge_queues(struct adapter *adap)
int err, msi_idx, i, j;
struct sge *s = &adap->sge;
- bitmap_zero(s->starving_fl, MAX_EGRQ);
- bitmap_zero(s->txq_maperr, MAX_EGRQ);
+ bitmap_zero(s->starving_fl, s->egr_sz);
+ bitmap_zero(s->txq_maperr, s->egr_sz);
if (adap->flags & USING_MSIX)
msi_idx = 1; /* vector 0 is for non-queue interrupts */
@@ -983,6 +1020,19 @@ static int setup_sge_queues(struct adapter *adap)
msi_idx = -((int)s->intrq.abs_id + 1);
}
+ /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
+ * don't forget to update the following which need to be
+ * synchronized to and changes here.
+ *
+ * 1. The calculations of MAX_INGQ in cxgb4.h.
+ *
+ * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
+ * to accommodate any new/deleted Ingress Queues
+ * which need MSI-X Vectors.
+ *
+ * 3. Update sge_qinfo_show() to include information on the
+ * new/deleted queues.
+ */
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
msi_idx, NULL, fwevtq_handler);
if (err) {
@@ -1018,51 +1068,27 @@ freeout: t4_free_sge_resources(adap);
j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
for_each_ofldrxq(s, i) {
- struct sge_ofld_rxq *q = &s->ofldrxq[i];
- struct net_device *dev = adap->port[i / j];
-
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
- q->fl.size ? &q->fl : NULL,
- uldrx_handler);
- if (err)
- goto freeout;
- memset(&q->stats, 0, sizeof(q->stats));
- s->ofld_rxq[i] = q->rspq.abs_id;
- err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
+ err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
+ adap->port[i / j],
s->fw_evtq.cntxt_id);
if (err)
goto freeout;
}
- for_each_rdmarxq(s, i) {
- struct sge_ofld_rxq *q = &s->rdmarxq[i];
-
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
- msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler);
- if (err)
- goto freeout;
- memset(&q->stats, 0, sizeof(q->stats));
- s->rdma_rxq[i] = q->rspq.abs_id;
- }
+#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
+ err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
+ if (err) \
+ goto freeout; \
+ if (msi_idx > 0) \
+ msi_idx += nq; \
+} while (0)
- for_each_rdmaciq(s, i) {
- struct sge_ofld_rxq *q = &s->rdmaciq[i];
+ ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
+ ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
+ j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
+ ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
- msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler);
- if (err)
- goto freeout;
- memset(&q->stats, 0, sizeof(q->stats));
- s->rdma_ciq[i] = q->rspq.abs_id;
- }
+#undef ALLOC_OFLD_RXQS
for_each_port(adap, i) {
/*
@@ -1273,6 +1299,10 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = 0;
} else {
txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+#ifdef CONFIG_CHELSIO_T4_FCOE
+ if (skb->protocol == htons(ETH_P_FCOE))
+ txq = skb->priority & 0x7;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
}
return txq;
}
@@ -4244,19 +4274,12 @@ static int cxgb_up(struct adapter *adap)
static void cxgb_down(struct adapter *adapter)
{
- t4_intr_disable(adapter);
cancel_work_sync(&adapter->tid_release_task);
cancel_work_sync(&adapter->db_full_task);
cancel_work_sync(&adapter->db_drop_task);
adapter->tid_release_task_busy = false;
adapter->tid_release_head = NULL;
- if (adapter->flags & USING_MSIX) {
- free_msix_queue_irqs(adapter);
- free_irq(adapter->msix_info[0].vec, adapter);
- } else
- free_irq(adapter->pdev->irq, adapter);
- quiesce_rx(adapter);
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
adapter->flags &= ~FULL_INIT_DONE;
@@ -4580,6 +4603,10 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
+#ifdef CONFIG_CHELSIO_T4_FCOE
+ .ndo_fcoe_enable = cxgb_fcoe_enable,
+ .ndo_fcoe_disable = cxgb_fcoe_disable,
+#endif /* CONFIG_CHELSIO_T4_FCOE */
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = cxgb_busy_poll,
#endif
@@ -4733,8 +4760,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
if (ret < 0)
return ret;
- ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
- 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+ ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+ MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
+ FW_CMD_CAP_PF);
if (ret < 0)
return ret;
@@ -5088,10 +5116,15 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
- struct fw_devlog_cmd devlog_cmd;
- u32 devlog_meminfo;
int reset = 1;
+ /* Grab Firmware Device Log parameters as early as possible so we have
+ * access to it for debugging, etc.
+ */
+ ret = t4_init_devlog_params(adap);
+ if (ret < 0)
+ return ret;
+
/* Contact FW, advertising Master capability */
ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
if (ret < 0) {
@@ -5169,30 +5202,6 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
- /* Read firmware device log parameters. We really need to find a way
- * to get these parameters initialized with some default values (which
- * are likely to be correct) for the case where we either don't
- * attache to the firmware or it's crashed when we probe the adapter.
- * That way we'll still be able to perform early firmware startup
- * debugging ... If the request to get the Firmware's Device Log
- * parameters fails, we'll live so we don't make that a fatal error.
- */
- memset(&devlog_cmd, 0, sizeof(devlog_cmd));
- devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
- ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
- &devlog_cmd);
- if (ret == 0) {
- devlog_meminfo =
- ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
- adap->params.devlog.memtype =
- FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
- adap->params.devlog.start =
- FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
- adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
- }
-
/*
* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
@@ -5293,6 +5302,51 @@ static int adap_init0(struct adapter *adap)
adap->tids.nftids = val[4] - val[3] + 1;
adap->sge.ingr_start = val[5];
+ /* qids (ingress/egress) returned from firmware can be anywhere
+ * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
+ * Hence driver needs to allocate memory for this range to
+ * store the queue info. Get the highest IQFLINT/EQ index returned
+ * in FW_EQ_*_CMD.alloc command.
+ */
+ params[0] = FW_PARAM_PFVF(EQ_END);
+ params[1] = FW_PARAM_PFVF(IQFLINT_END);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
+ adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
+
+ adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
+ sizeof(*adap->sge.egr_map), GFP_KERNEL);
+ if (!adap->sge.egr_map) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
+ adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
+ sizeof(*adap->sge.ingr_map), GFP_KERNEL);
+ if (!adap->sge.ingr_map) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
+ /* Allocate the memory for the vaious egress queue bitmaps
+ * ie starving_fl and txq_maperr.
+ */
+ adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->sge.starving_fl) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
+ adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->sge.txq_maperr) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
params[0] = FW_PARAM_PFVF(CLIP_START);
params[1] = FW_PARAM_PFVF(CLIP_END);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
@@ -5368,7 +5422,7 @@ static int adap_init0(struct adapter *adap)
adap->tids.stid_base = val[1];
adap->tids.nstids = val[2] - val[1] + 1;
/*
- * Setup server filter region. Divide the availble filter
+ * Setup server filter region. Divide the available filter
* region into two parts. Regular filters get 1/3rd and server
* filters get 2/3rd part. This is only enabled if workarond
* path is enabled.
@@ -5501,6 +5555,10 @@ static int adap_init0(struct adapter *adap)
* happened to HW/FW, stop issuing commands.
*/
bye:
+ kfree(adap->sge.egr_map);
+ kfree(adap->sge.ingr_map);
+ kfree(adap->sge.starving_fl);
+ kfree(adap->sge.txq_maperr);
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
return ret;
@@ -5528,6 +5586,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
netif_carrier_off(dev);
}
spin_unlock(&adap->stats_lock);
+ disable_interrupts(adap);
if (adap->flags & FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
@@ -5705,7 +5764,16 @@ static void cfg_queues(struct adapter *adap)
s->ofldqsets = adap->params.nports;
/* For RDMA one Rx queue per channel suffices */
s->rdmaqs = adap->params.nports;
- s->rdmaciqs = adap->params.nports;
+ /* Try and allow at least 1 CIQ per cpu rounding down
+ * to the number of ports, with a minimum of 1 per port.
+ * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
+ * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
+ * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
+ */
+ s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
+ s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
+ adap->params.nports;
+ s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -5791,12 +5859,17 @@ static void reduce_ethqs(struct adapter *adap, int n)
static int enable_msix(struct adapter *adap)
{
int ofld_need = 0;
- int i, want, need;
+ int i, want, need, allocated;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
- struct msix_entry entries[MAX_INGQ + 1];
+ struct msix_entry *entries;
+
+ entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
+ GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ for (i = 0; i < MAX_INGQ + 1; ++i)
entries[i].entry = i;
want = s->max_ethqsets + EXTRA_VECS;
@@ -5813,29 +5886,39 @@ static int enable_msix(struct adapter *adap)
#else
need = adap->params.nports + EXTRA_VECS + ofld_need;
#endif
- want = pci_enable_msix_range(adap->pdev, entries, need, want);
- if (want < 0)
- return want;
+ allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
+ if (allocated < 0) {
+ dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
+ " not using MSI-X\n");
+ kfree(entries);
+ return allocated;
+ }
- /*
- * Distribute available vectors to the various queue groups.
+ /* Distribute available vectors to the various queue groups.
* Every group gets its minimum requirement and NIC gets top
* priority for leftovers.
*/
- i = want - EXTRA_VECS - ofld_need;
+ i = allocated - EXTRA_VECS - ofld_need;
if (i < s->max_ethqsets) {
s->max_ethqsets = i;
if (i < s->ethqsets)
reduce_ethqs(adap, i);
}
if (is_offload(adap)) {
- i = want - EXTRA_VECS - s->max_ethqsets;
- i -= ofld_need - nchan;
+ if (allocated < want) {
+ s->rdmaqs = nchan;
+ s->rdmaciqs = nchan;
+ }
+
+ /* leftovers go to OFLD */
+ i = allocated - EXTRA_VECS - s->max_ethqsets -
+ s->rdmaqs - s->rdmaciqs;
s->ofldqsets = (i / nchan) * nchan; /* round down */
}
- for (i = 0; i < want; ++i)
+ for (i = 0; i < allocated; ++i)
adap->msix_info[i].vec = entries[i].vector;
+ kfree(entries);
return 0;
}
@@ -5912,6 +5995,10 @@ static void free_some_resources(struct adapter *adapter)
t4_free_mem(adapter->l2t);
t4_free_mem(adapter->tids.tid_tab);
+ kfree(adapter->sge.egr_map);
+ kfree(adapter->sge.ingr_map);
+ kfree(adapter->sge.starving_fl);
+ kfree(adapter->sge.txq_maperr);
disable_msi(adapter);
for_each_port(adapter, i)
@@ -6237,6 +6324,8 @@ static void remove_one(struct pci_dev *pdev)
if (is_offload(adapter))
detach_ulds(adapter);
+ disable_interrupts(adapter);
+
for_each_port(adapter, i)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);