aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c124
1 files changed, 73 insertions, 51 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index c15e43a8543b..cb684d30831f 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -91,6 +91,10 @@ struct rx_desc {
struct tx_sw_desc { /* SW state per Tx descriptor */
struct sk_buff *skb;
+ u8 eop; /* set if last descriptor for packet */
+ u8 addr_idx; /* buffer index of first SGL entry in descriptor */
+ u8 fragidx; /* first page fragment associated with descriptor */
+ s8 sflit; /* start flit of first SGL entry in descriptor */
};
struct rx_sw_desc { /* SW state per Rx descriptor */
@@ -109,13 +113,6 @@ struct rsp_desc { /* response queue descriptor */
u8 intr_gen;
};
-struct unmap_info { /* packet unmapping info, overlays skb->cb */
- int sflit; /* start flit of first SGL entry in Tx descriptor */
- u16 fragidx; /* first page fragment in current Tx descriptor */
- u16 addr_idx; /* buffer index of first SGL entry in descriptor */
- u32 len; /* mapped length of skb main body */
-};
-
/*
* Holds unmapping information for Tx packets that need deferred unmapping.
* This structure lives at skb->head and must be allocated by callers.
@@ -177,6 +174,7 @@ static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
static inline void refill_rspq(struct adapter *adapter,
const struct sge_rspq *q, unsigned int credits)
{
+ rmb();
t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
}
@@ -209,32 +207,36 @@ static inline int need_skb_unmap(void)
*
* Unmap the main body of an sk_buff and its page fragments, if any.
* Because of the fairly complicated structure of our SGLs and the desire
- * to conserve space for metadata, we keep the information necessary to
- * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
- * in the Tx descriptors (the physical addresses of the various data
- * buffers). The send functions initialize the state in skb->cb so we
- * can unmap the buffers held in the first Tx descriptor here, and we
- * have enough information at this point to update the state for the next
- * Tx descriptor.
+ * to conserve space for metadata, the information necessary to unmap an
+ * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
+ * descriptors (the physical addresses of the various data buffers), and
+ * the SW descriptor state (assorted indices). The send functions
+ * initialize the indices for the first packet descriptor so we can unmap
+ * the buffers held in the first Tx descriptor here, and we have enough
+ * information at this point to set the state for the next Tx descriptor.
+ *
+ * Note that it is possible to clean up the first descriptor of a packet
+ * before the send routines have written the next descriptors, but this
+ * race does not cause any problem. We just end up writing the unmapping
+ * info for the descriptor first.
*/
static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
unsigned int cidx, struct pci_dev *pdev)
{
const struct sg_ent *sgp;
- struct unmap_info *ui = (struct unmap_info *)skb->cb;
- int nfrags, frag_idx, curflit, j = ui->addr_idx;
+ struct tx_sw_desc *d = &q->sdesc[cidx];
+ int nfrags, frag_idx, curflit, j = d->addr_idx;
- sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
+ sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
+ frag_idx = d->fragidx;
- if (ui->len) {
- pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
- PCI_DMA_TODEVICE);
- ui->len = 0; /* so we know for next descriptor for this skb */
+ if (frag_idx == 0 && skb_headlen(skb)) {
+ pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
j = 1;
}
- frag_idx = ui->fragidx;
- curflit = ui->sflit + 1 + j;
+ curflit = d->sflit + 1 + j;
nfrags = skb_shinfo(skb)->nr_frags;
while (frag_idx < nfrags && curflit < WR_FLITS) {
@@ -250,10 +252,11 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
frag_idx++;
}
- if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
- ui->fragidx = frag_idx;
- ui->addr_idx = j;
- ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
+ if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
+ d = cidx + 1 == q->size ? q->sdesc : d + 1;
+ d->fragidx = frag_idx;
+ d->addr_idx = j;
+ d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
}
}
@@ -281,7 +284,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
if (d->skb) { /* an SGL is present */
if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
- if (d->skb->priority == cidx)
+ if (d->eop)
kfree_skb(d->skb);
}
++d;
@@ -456,7 +459,7 @@ nomem: q->alloc_failed++;
}
q->credits++;
}
-
+ wmb();
t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
}
@@ -912,15 +915,13 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
sd->skb = skb;
if (need_skb_unmap()) {
- struct unmap_info *ui = (struct unmap_info *)skb->cb;
-
- ui->fragidx = 0;
- ui->addr_idx = 0;
- ui->sflit = flits;
+ sd->fragidx = 0;
+ sd->addr_idx = 0;
+ sd->sflit = flits;
}
if (likely(ndesc == 1)) {
- skb->priority = pidx;
+ sd->eop = 1;
wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
V_WR_SGLSFLT(flits)) | wr_hi;
wmb();
@@ -948,6 +949,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
fp += avail;
d++;
+ sd->eop = 0;
sd++;
if (++pidx == q->size) {
pidx = 0;
@@ -966,7 +968,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
wr_gen2(d, gen);
flits = 1;
}
- skb->priority = pidx;
+ sd->eop = 1;
wrp->wr_hi |= htonl(F_WR_EOP);
wmb();
wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
@@ -1051,8 +1053,6 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
- if (need_skb_unmap())
- ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1354,6 +1354,7 @@ static void restart_ctrlq(unsigned long data)
}
spin_unlock(&q->lock);
+ wmb();
t3_write_reg(qs->adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
@@ -1363,7 +1364,12 @@ static void restart_ctrlq(unsigned long data)
*/
int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
{
- return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+ int ret;
+ local_bh_disable();
+ ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+ local_bh_enable();
+
+ return ret;
}
/**
@@ -1380,13 +1386,14 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
const dma_addr_t *p;
const struct skb_shared_info *si;
const struct deferred_unmap_info *dui;
- const struct unmap_info *ui = (struct unmap_info *)skb->cb;
dui = (struct deferred_unmap_info *)skb->head;
p = dui->addr;
- if (ui->len)
- pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
+ if (skb->tail - skb->transport_header)
+ pci_unmap_single(dui->pdev, *p++,
+ skb->tail - skb->transport_header,
+ PCI_DMA_TODEVICE);
si = skb_shinfo(skb);
for (i = 0; i < si->nr_frags; i++)
@@ -1451,8 +1458,6 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
skb->destructor = deferred_unmap_destructor;
- ((struct unmap_info *)skb->cb)->len = (skb->tail -
- skb->transport_header);
}
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
@@ -1574,6 +1579,7 @@ static void restart_offloadq(unsigned long data)
set_bit(TXQ_RUNNING, &q->flags);
set_bit(TXQ_LAST_PKT_DB, &q->flags);
#endif
+ wmb();
t3_write_reg(adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
@@ -1739,7 +1745,6 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
struct sk_buff *skb, struct sk_buff *rx_gather[],
unsigned int gather_idx)
{
- rq->offload_pkts++;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
@@ -1809,7 +1814,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
skb->dev->last_rx = jiffies;
pi = netdev_priv(skb->dev);
- if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
+ if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
!p->fragment) {
rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1956,7 +1961,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
int eth, ethpad = 2;
struct sk_buff *skb = NULL;
u32 len, flags = ntohl(r->flags);
- u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
+ __be32 rss_hi = *(const __be32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
eth = r->rss_hdr.opcode == CPL_RX_PKT;
@@ -2033,6 +2038,7 @@ no_mem:
if (eth)
rx_eth(adap, q, skb, ethpad);
else {
+ q->offload_pkts++;
/* Preserve the RSS info in csum & priority */
skb->csum = rss_hi;
skb->priority = rss_lo;
@@ -2442,6 +2448,15 @@ irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
return t3_intr;
}
+#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+ F_HIRCQPARITYERROR)
+#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
+#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
+ F_RSPQDISABLED)
+
/**
* t3_sge_err_intr_handler - SGE async event interrupt handler
* @adapter: the adapter
@@ -2452,6 +2467,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
{
unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
+ if (status & SGE_PARERR)
+ CH_ALERT(adapter, "SGE parity error (0x%x)\n",
+ status & SGE_PARERR);
+ if (status & SGE_FRAMINGERR)
+ CH_ALERT(adapter, "SGE framing error (0x%x)\n",
+ status & SGE_FRAMINGERR);
+
if (status & F_RSPQCREDITOVERFOW)
CH_ALERT(adapter, "SGE response queue credit overflow\n");
@@ -2468,7 +2490,7 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
status & F_HIPIODRBDROPERR ? "high" : "lo");
t3_write_reg(adapter, A_SG_INT_CAUSE, status);
- if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
+ if (status & SGE_FATALERR)
t3_fatal_err(adapter);
}
@@ -2780,7 +2802,7 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
- F_CQCRDTCTRL |
+ F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
#if SGE_NUM_GENBITS == 1
@@ -2789,7 +2811,6 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
if (adap->params.rev > 0) {
if (!(adap->flags & (USING_MSIX | USING_MSI)))
ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
- ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
}
t3_write_reg(adap, A_SG_CONTROL, ctrl);
t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
@@ -2797,7 +2818,8 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
V_TIMEOUT(200 * core_ticks_per_usec(adap)));
- t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
+ t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
+ adap->params.rev < T3_REV_C ? 1000 : 500);
t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);