aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c63
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c22
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h6
7 files changed, 93 insertions, 30 deletions
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 99b30a952b38..38db2e4d7d54 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
}
+ /* This (reset &) enable is not preset in specs or reference driver but
+ * Broadcom does it in arch PCI code when enabling fake PCI device.
+ */
+ bcma_core_enable(core, 0);
+
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4fbb093e0d84..9a03c142b742 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
-#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
+#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
+#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aabbd51db981..72eb29ed0359 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -581,12 +581,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
struct page *page;
dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
+ unsigned int offset = 0;
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ page = rxr->rx_page;
+ if (!page) {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ rxr->rx_page = page;
+ rxr->rx_page_offset = 0;
+ }
+ offset = rxr->rx_page_offset;
+ rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+ if (rxr->rx_page_offset == PAGE_SIZE)
+ rxr->rx_page = NULL;
+ else
+ get_page(page);
+ } else {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ }
- mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+ mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&pdev->dev, mapping)) {
__free_page(page);
@@ -601,6 +619,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
rx_agg_buf->page = page;
+ rx_agg_buf->offset = offset;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
rxbd->rx_bd_opaque = sw_prod;
@@ -642,6 +661,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
prod_rx_buf->page = page;
+ prod_rx_buf->offset = cons_rx_buf->offset;
prod_rx_buf->mapping = cons_rx_buf->mapping;
@@ -709,7 +729,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+ skb_fill_page_desc(skb, i, cons_rx_buf->page,
+ cons_rx_buf->offset, frag_len);
__clear_bit(cons, rxr->rx_agg_bmap);
/* It is possible for bnxt_alloc_rx_page() to allocate
@@ -740,7 +761,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
return NULL;
}
- dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+ dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
PCI_DMA_FROMDEVICE);
skb->data_len += frag_len;
@@ -1584,13 +1605,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
dma_unmap_page(&pdev->dev,
dma_unmap_addr(rx_agg_buf, mapping),
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
rx_agg_buf->page = NULL;
__clear_bit(j, rxr->rx_agg_bmap);
__free_page(page);
}
+ if (rxr->rx_page) {
+ __free_page(rxr->rx_page);
+ rxr->rx_page = NULL;
+ }
}
}
@@ -1973,7 +1998,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type);
@@ -2164,7 +2189,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->rx_agg_nr_pages = 0;
if (bp->flags & BNXT_FLAG_TPA)
- agg_factor = 4;
+ agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
if (rx_space > PAGE_SIZE) {
@@ -2653,7 +2678,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Write request msg to hwrm channel */
__iowrite32_copy(bp->bar0, data, msg_len / 4);
- for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4)
+ for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
writel(0, bp->bar0 + i);
/* currently supports only one outstanding message */
@@ -3020,12 +3045,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
/* Number of segs are log2 units, and first packet is not
* included as part of this units.
*/
- if (mss <= PAGE_SIZE) {
- n = PAGE_SIZE / mss;
+ if (mss <= BNXT_RX_PAGE_SIZE) {
+ n = BNXT_RX_PAGE_SIZE / mss;
nsegs = (MAX_SKB_FRAGS - 1) * n;
} else {
- n = mss / PAGE_SIZE;
- if (mss & (PAGE_SIZE - 1))
+ n = mss / BNXT_RX_PAGE_SIZE;
+ if (mss & (BNXT_RX_PAGE_SIZE - 1))
n++;
nsegs = (MAX_SKB_FRAGS - n) / n;
}
@@ -3391,11 +3416,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ cpr->cp_doorbell = bp->bar1 + i * 0x80;
rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
INVALID_STATS_CTX_ID);
if (rc)
goto err_out;
- cpr->cp_doorbell = bp->bar1 + i * 0x80;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
}
@@ -3830,6 +3855,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
struct hwrm_ver_get_input req = {0};
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req.hwrm_intf_min = HWRM_VERSION_MINOR;
@@ -3855,6 +3881,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+ if (resp->hwrm_intf_maj >= 1)
+ bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4305,7 +4334,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_MSIX_CAP)
rc = bnxt_setup_msix(bp);
- if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
/* fallback to INTA */
rc = bnxt_setup_inta(bp);
}
@@ -4555,7 +4584,7 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
- req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
req->enables |=
cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
} else {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index ec04c47172b7..8b823ff558ff 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNXT_RX_PAGE_SHIFT 15
+#else
+#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+
+#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+
#define BNXT_MIN_PKT_SIZE 45
#define BNXT_NUM_TESTS(bp) 0
@@ -477,6 +486,7 @@ struct rx_tpa_end_cmp_ext {
#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
+#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define DFLT_HWRM_CMD_TIMEOUT 500
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -505,6 +515,7 @@ struct bnxt_sw_rx_bd {
struct bnxt_sw_rx_agg_bd {
struct page *page;
+ unsigned int offset;
dma_addr_t mapping;
};
@@ -585,6 +596,9 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ struct page *rx_page;
+ unsigned int rx_page_offset;
+
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
@@ -953,6 +967,7 @@ struct bnxt {
dma_addr_t hw_tx_port_stats_map;
int hw_port_stats_size;
+ u16 hwrm_max_req_len;
int hwrm_cmd_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ada1662b651..2e472f6dbf2d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -855,10 +855,8 @@ static void bnxt_get_pauseparam(struct net_device *dev,
if (BNXT_VF(bp))
return;
epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
- epause->rx_pause =
- ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0);
- epause->tx_pause =
- ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0);
+ epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
+ epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
}
static int bnxt_set_pauseparam(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6746fd03cb3a..44ad1490b472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
}
}
@@ -1171,6 +1175,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned int pkts_compl = 0;
+ unsigned int bytes_compl = 0;
unsigned int c_index;
unsigned int txbds_ready;
unsigned int txbds_processed = 0;
@@ -1193,16 +1198,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
if (tx_cb_ptr->skb) {
pkts_compl++;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
bcmgenet_free_cb(tx_cb_ptr);
} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
- dev->stats.tx_bytes +=
- dma_unmap_len(tx_cb_ptr, dma_len);
dma_unmap_page(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
dma_unmap_len(tx_cb_ptr, dma_len),
@@ -1220,6 +1222,9 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->free_bds += txbds_processed;
ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+ dev->stats.tx_packets += pkts_compl;
+ dev->stats.tx_bytes += bytes_compl;
+
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
txq = netdev_get_tx_queue(dev, ring->queue);
if (netif_tx_queue_stopped(txq))
@@ -1296,7 +1301,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
tx_cb_ptr->skb = skb;
- skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+ skb_len = skb_headlen(skb);
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
@@ -1464,6 +1469,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ /* Retain how many bytes will be sent on the wire, without TSB inserted
+ * by transmit checksum offload
+ */
+ GENET_CB(skb)->bytes_sent = skb->len;
+
/* set the SKB transmit checksum */
if (priv->desc_64b_en) {
skb = bcmgenet_put_tx_csum(dev, skb);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 967367557309..1e2dc34d331a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -531,6 +531,12 @@ struct bcmgenet_hw_params {
u32 flags;
};
+struct bcmgenet_skb_cb {
+ unsigned int bytes_sent; /* bytes on the wire (no TSB) */
+};
+
+#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb))
+
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */