aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c223
1 files changed, 181 insertions, 42 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 5d402bace6c1..fc04e3b878e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -82,6 +82,24 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
spin_unlock_irqrestore(&bmap->lock, flags);
}
+/* Flush the aggregated lro sessions */
+static void uldrx_flush_handler(struct sge_rspq *q)
+{
+ struct adapter *adap = q->adap;
+
+ if (adap->uld[q->uld].lro_flush)
+ adap->uld[q->uld].lro_flush(&q->lro_mgr);
+}
+
+/**
+ * uldrx_handler - response queue handler for ULD queues
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the offload message
+ * @gl: the gather list of packet fragments
+ *
+ * Deliver an ingress offload packet to a ULD. All processing is done by
+ * the ULD, we just maintain statistics.
+ */
static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
{
@@ -124,8 +142,8 @@ static int alloc_uld_rxqs(struct adapter *adap,
struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
unsigned short *ids = rxq_info->rspq_id + offset;
unsigned int per_chan = nq / adap->params.nports;
- unsigned int msi_idx, bmap_idx;
- int i, err;
+ unsigned int bmap_idx = 0;
+ int i, err, msi_idx;
if (adap->flags & USING_MSIX)
msi_idx = 1;
@@ -135,14 +153,14 @@ static int alloc_uld_rxqs(struct adapter *adap,
for (i = 0; i < nq; i++, q++) {
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
- adap->msi_idx++;
+ msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[i / per_chan],
- adap->msi_idx,
+ msi_idx,
q->fl.size ? &q->fl : NULL,
uldrx_handler,
- NULL,
+ lro ? uldrx_flush_handler : NULL,
0);
if (err)
goto freeout;
@@ -159,7 +177,6 @@ freeout:
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
- adap->msi_idx--;
}
/* We need to free rxq also in case of ciq allocation failure */
@@ -169,7 +186,6 @@ freeout:
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
- adap->msi_idx--;
}
}
return err;
@@ -178,17 +194,38 @@ freeout:
int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int i, ret = 0;
if (adap->flags & USING_MSIX) {
- rxq_info->msix_tbl = kzalloc(rxq_info->nrxq + rxq_info->nciq,
+ rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
+ sizeof(unsigned short),
GFP_KERNEL);
if (!rxq_info->msix_tbl)
return -ENOMEM;
}
- return !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
!alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
rxq_info->nrxq, lro));
+
+ /* Tell uP to route control queue completions to rdma rspq */
+ if (adap->flags & FULL_INIT_DONE &&
+ !ret && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ unsigned int cmplqid;
+ u32 param, cmdop;
+
+ cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
+ for_each_port(adap, i) {
+ cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(cmdop) |
+ FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &param, &cmplqid);
+ }
+ }
+ return ret;
}
static void t4_free_uld_rxqs(struct adapter *adap, int n,
@@ -198,7 +235,6 @@ static void t4_free_uld_rxqs(struct adapter *adap, int n,
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
- adap->msi_idx--;
}
}
@@ -206,6 +242,21 @@ void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ u32 param, cmdop, cmplqid = 0;
+ int i;
+
+ cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
+ for_each_port(adap, i) {
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(cmdop) |
+ FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
+ t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &param, &cmplqid);
+ }
+ }
+
if (rxq_info->nciq)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
@@ -215,26 +266,38 @@ void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
}
int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
- const struct cxgb4_pci_uld_info *uld_info)
+ const struct cxgb4_uld_info *uld_info)
{
struct sge *s = &adap->sge;
struct sge_uld_rxq_info *rxq_info;
- int i, nrxq;
+ int i, nrxq, ciq_size;
rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
if (!rxq_info)
return -ENOMEM;
- if (uld_info->nrxq > s->nqs_per_uld)
- rxq_info->nrxq = s->nqs_per_uld;
- else
- rxq_info->nrxq = uld_info->nrxq;
- if (!uld_info->nciq)
+ if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
+ i = s->nqs_per_uld;
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ } else {
+ i = min_t(int, uld_info->nrxq,
+ num_online_cpus());
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ }
+ if (!uld_info->ciq) {
rxq_info->nciq = 0;
- else if (uld_info->nciq && uld_info->nciq > s->nqs_per_uld)
- rxq_info->nciq = s->nqs_per_uld;
- else
- rxq_info->nciq = uld_info->nciq;
+ } else {
+ if (adap->flags & USING_MSIX)
+ rxq_info->nciq = min_t(int, s->nqs_per_uld,
+ num_online_cpus());
+ else
+ rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
+ num_online_cpus());
+ rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
+ adap->params.nports);
+ rxq_info->nciq = max_t(int, rxq_info->nciq,
+ adap->params.nports);
+ }
nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
@@ -259,12 +322,17 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
r->fl.size = 72;
}
+ ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
+ if (ciq_size > SGE_MAX_IQ_SIZE) {
+ dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
+ ciq_size = SGE_MAX_IQ_SIZE;
+ }
+
for (i = rxq_info->nrxq; i < nrxq; i++) {
struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
- init_rspq(adap, &r->rspq, 5, 1, uld_info->ciq_size, 64);
+ init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
r->rspq.uld = uld_type;
- r->fl.size = 72;
}
memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
@@ -285,7 +353,8 @@ void free_queues_uld(struct adapter *adap, unsigned int uld_type)
int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int idx, bmap_idx, err = 0;
+ int err = 0;
+ unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
@@ -310,10 +379,10 @@ unwind:
void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int idx;
+ unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
- unsigned int bmap_idx = rxq_info->msix_tbl[idx];
+ bmap_idx = rxq_info->msix_tbl[idx];
free_msix_idx_in_bmap(adap, bmap_idx);
free_irq(adap->msix_info_ulds[bmap_idx].vec,
@@ -325,10 +394,10 @@ void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int n = sizeof(adap->msix_info_ulds[0].desc);
- int idx;
+ unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
- unsigned int bmap_idx = rxq_info->msix_tbl[idx];
+ bmap_idx = rxq_info->msix_tbl[idx];
snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
adap->port[0]->name, rxq_info->name, idx);
@@ -390,15 +459,15 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
lli->nciq = rxq_info->nciq;
}
-int uld_mem_alloc(struct adapter *adap)
+int t4_uld_mem_alloc(struct adapter *adap)
{
struct sge *s = &adap->sge;
- adap->uld = kcalloc(adap->num_uld, sizeof(*adap->uld), GFP_KERNEL);
+ adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
if (!adap->uld)
return -ENOMEM;
- s->uld_rxq_info = kzalloc(adap->num_uld *
+ s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
sizeof(struct sge_uld_rxq_info *),
GFP_KERNEL);
if (!s->uld_rxq_info)
@@ -410,7 +479,7 @@ err_uld:
return -ENOMEM;
}
-void uld_mem_free(struct adapter *adap)
+void t4_uld_mem_free(struct adapter *adap)
{
struct sge *s = &adap->sge;
@@ -418,6 +487,26 @@ void uld_mem_free(struct adapter *adap)
kfree(adap->uld);
}
+void t4_uld_clean_up(struct adapter *adap)
+{
+ struct sge_uld_rxq_info *rxq_info;
+ unsigned int i;
+
+ if (!adap->uld)
+ return;
+ for (i = 0; i < CXGB4_ULD_MAX; i++) {
+ if (!adap->uld[i].handle)
+ continue;
+ rxq_info = adap->sge.uld_rxq_info[i];
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, i);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, i);
+ free_sge_queues_uld(adap, i);
+ free_queues_uld(adap, i);
+ }
+}
+
static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
{
int i;
@@ -429,10 +518,15 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->ports = adap->port;
lld->vr = &adap->vres;
lld->mtus = adap->params.mtus;
- lld->ntxq = adap->sge.iscsiqsets;
+ lld->ntxq = adap->sge.ofldqsets;
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
+ lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
+ lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
+ lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
+ lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
+ lld->iscsi_ppm = &adap->iscsi_ppm;
lld->adapter_type = adap->params.chip;
lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
lld->udb_density = 1 << adap->params.sge.eq_qpp;
@@ -472,23 +566,37 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
}
adap->uld[uld].handle = handle;
+ t4_register_netevent_notifier();
if (adap->flags & FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
}
-int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
- struct cxgb4_pci_uld_info *p)
+/**
+ * cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
+ *
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type. Returns
+ * %-EBUSY if a ULD of the same type is already registered.
+ */
+int cxgb4_register_uld(enum cxgb4_uld type,
+ const struct cxgb4_uld_info *p)
{
int ret = 0;
+ unsigned int adap_idx = 0;
struct adapter *adap;
- if (type >= CXGB4_PCI_ULD_MAX)
+ if (type >= CXGB4_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
- if (!is_pci_uld(adap))
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue;
ret = cfg_queues_uld(adap, type, p);
if (ret)
@@ -510,11 +618,14 @@ int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
}
adap->uld[type] = *p;
uld_attach(adap, type);
+ adap_idx++;
}
mutex_unlock(&uld_mutex);
return 0;
free_irq:
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_rxq:
@@ -522,21 +633,49 @@ free_rxq:
free_queues:
free_queues_uld(adap, type);
out:
+
+ list_for_each_entry(adap, &adapter_list, list_node) {
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ continue;
+ if (!adap_idx)
+ break;
+ adap->uld[type].handle = NULL;
+ adap->uld[type].add = NULL;
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+ free_sge_queues_uld(adap, type);
+ free_queues_uld(adap, type);
+ adap_idx--;
+ }
mutex_unlock(&uld_mutex);
return ret;
}
-EXPORT_SYMBOL(cxgb4_register_pci_uld);
+EXPORT_SYMBOL(cxgb4_register_uld);
-int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type)
+/**
+ * cxgb4_unregister_uld - unregister an upper-layer driver
+ * @type: the ULD type
+ *
+ * Unregisters an existing upper-layer driver.
+ */
+int cxgb4_unregister_uld(enum cxgb4_uld type)
{
struct adapter *adap;
- if (type >= CXGB4_PCI_ULD_MAX)
+ if (type >= CXGB4_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
- if (!is_pci_uld(adap))
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue;
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
@@ -551,4 +690,4 @@ int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type)
return 0;
}
-EXPORT_SYMBOL(cxgb4_unregister_pci_uld);
+EXPORT_SYMBOL(cxgb4_unregister_uld);