aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnxt/bnxt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c61
1 files changed, 52 insertions, 9 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 11e8a866a312..a19f68f5862d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1311,10 +1311,11 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
cp_cons = NEXT_CMP(cp_cons);
}
- if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+ if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
- netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
- agg_bufs, (int)MAX_SKB_FRAGS);
+ if (agg_bufs > MAX_SKB_FRAGS)
+ netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+ agg_bufs, (int)MAX_SKB_FRAGS);
return NULL;
}
@@ -1573,6 +1574,45 @@ next_rx_no_prod:
return rc;
}
+/* In netpoll mode, if we are using a combined completion ring, we need to
+ * discard the rx packets and recycle the buffers.
+ */
+static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u32 *raw_cons, u8 *event)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 tmp_raw_cons = *raw_cons;
+ struct rx_cmp_ext *rxcmp1;
+ struct rx_cmp *rxcmp;
+ u16 cp_cons;
+ u8 cmp_type;
+
+ cp_cons = RING_CMP(tmp_raw_cons);
+ rxcmp = (struct rx_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+ return -EBUSY;
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ rxcmp1->rx_cmp_cfa_code_errors_v2 |=
+ cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ struct rx_tpa_end_cmp_ext *tpa_end1;
+
+ tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
+ tpa_end1->rx_tpa_end_cmp_errors_v2 |=
+ cpu_to_le32(RX_TPA_END_CMP_ERRORS);
+ }
+ return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
+}
+
#define BNXT_GET_EVENT_PORT(data) \
((data) & \
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
@@ -1755,7 +1795,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (unlikely(tx_pkts > bp->tx_wake_thresh))
rx_pkts = budget;
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+ if (likely(budget))
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+ else
+ rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
+ &event);
if (likely(rc >= 0))
rx_pkts += rc;
else if (rc == -EBUSY) /* partial completion */
@@ -6730,12 +6774,11 @@ static void bnxt_poll_controller(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
int i;
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_irq *irq = &bp->irq_tbl[i];
+ /* Only process tx rings/combined rings in netpoll mode. */
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- disable_irq(irq->vector);
- irq->handler(irq->vector, bp->bnapi[i]);
- enable_irq(irq->vector);
+ napi_schedule(&txr->bnapi->napi);
}
}
#endif