aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c685
1 files changed, 489 insertions, 196 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4c5ce9848ca9..eac45d0c75e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -56,6 +56,7 @@
#include "dwmac1000.h"
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
#define TX_TIMEO 5000
@@ -278,7 +279,6 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
- char *phy_bus_name = priv->plat->phy_bus_name;
unsigned long flags;
bool ret = false;
@@ -289,10 +289,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
(priv->pcs == STMMAC_PCS_RTBI))
goto out;
- /* Never init EEE in case of a switch is attached */
- if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
- goto out;
-
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
int tx_lpi_timer = priv->tx_lpi_timer;
@@ -726,13 +722,15 @@ static void stmmac_adjust_link(struct net_device *dev)
new_state = 1;
switch (phydev->speed) {
case 1000:
- if (likely(priv->plat->has_gmac))
+ if (likely((priv->plat->has_gmac) ||
+ (priv->plat->has_gmac4)))
ctrl &= ~priv->hw->link.port;
stmmac_hw_fix_mac_speed(priv);
break;
case 100:
case 10:
- if (priv->plat->has_gmac) {
+ if (likely((priv->plat->has_gmac) ||
+ (priv->plat->has_gmac4))) {
ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
ctrl |= priv->hw->link.speed;
@@ -772,10 +770,16 @@ static void stmmac_adjust_link(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /* At this stage, it could be needed to setup the EEE or adjust some
- * MAC related HW registers.
- */
- priv->eee_enabled = stmmac_eee_init(priv);
+ if (phydev->is_pseudo_fixed_link)
+ /* Stop PHY layer to call the hook to adjust the link in case
+ * of a switch is attached to the stmmac driver.
+ */
+ phydev->irq = PHY_IGNORE_INTERRUPT;
+ else
+ /* At this stage, init the EEE if supported.
+ * Never called in case of fixed_link.
+ */
+ priv->eee_enabled = stmmac_eee_init(priv);
}
/**
@@ -827,12 +831,8 @@ static int stmmac_init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, priv->plat->phy_node,
&stmmac_adjust_link, 0, interface);
} else {
- if (priv->plat->phy_bus_name)
- snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
- priv->plat->phy_bus_name, priv->plat->bus_id);
- else
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
- priv->plat->bus_id);
+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+ priv->plat->bus_id);
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
@@ -870,11 +870,6 @@ static int stmmac_init_phy(struct net_device *dev)
return -ENODEV;
}
- /* If attached to a switch, there is no reason to poll phy handler */
- if (priv->plat->phy_bus_name)
- if (!strcmp(priv->plat->phy_bus_name, "fixed"))
- phydev->irq = PHY_IGNORE_INTERRUPT;
-
pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
" Link = %d\n", dev->name, phydev->phy_id, phydev->link);
@@ -883,53 +878,22 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-/**
- * stmmac_display_ring - display ring
- * @head: pointer to the head of the ring passed.
- * @size: size of the ring.
- * @extend_desc: to verify if extended descriptors are used.
- * Description: display the control/status and buffer descriptors.
- */
-static void stmmac_display_ring(void *head, int size, int extend_desc)
-{
- int i;
- struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
- struct dma_desc *p = (struct dma_desc *)head;
-
- for (i = 0; i < size; i++) {
- u64 x;
- if (extend_desc) {
- x = *(u64 *) ep;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
- i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
- ep->basic.des2, ep->basic.des3);
- ep++;
- } else {
- x = *(u64 *) p;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
- i, (unsigned int)virt_to_phys(p),
- (unsigned int)x, (unsigned int)(x >> 32),
- p->des2, p->des3);
- p++;
- }
- pr_info("\n");
- }
-}
-
static void stmmac_display_rings(struct stmmac_priv *priv)
{
+ void *head_rx, *head_tx;
+
if (priv->extend_desc) {
- pr_info("Extended RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1);
- pr_info("Extended TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1);
+ head_rx = (void *)priv->dma_erx;
+ head_tx = (void *)priv->dma_etx;
} else {
- pr_info("RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0);
- pr_info("TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0);
+ head_rx = (void *)priv->dma_rx;
+ head_tx = (void *)priv->dma_tx;
}
+
+ /* Display Rx ring */
+ priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+ /* Display Tx ring */
+ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1008,7 +972,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return -EINVAL;
}
- p->des2 = priv->rx_skbuff_dma[i];
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ p->des0 = priv->rx_skbuff_dma[i];
+ else
+ p->des2 = priv->rx_skbuff_dma[i];
if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -1099,7 +1066,16 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
p = &((priv->dma_etx + i)->basic);
else
p = priv->dma_tx + i;
- p->des2 = 0;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+ } else {
+ p->des2 = 0;
+ }
+
priv->tx_skbuff_dma[i].buf = 0;
priv->tx_skbuff_dma[i].map_as_page = false;
priv->tx_skbuff_dma[i].len = 0;
@@ -1362,9 +1338,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry].buf = 0;
+ priv->tx_skbuff_dma[entry].len = 0;
priv->tx_skbuff_dma[entry].map_as_page = false;
}
- priv->hw->mode->clean_desc3(priv, p);
+
+ if (priv->hw->mode->clean_desc3)
+ priv->hw->mode->clean_desc3(priv, p);
+
priv->tx_skbuff_dma[entry].last_segment = false;
priv->tx_skbuff_dma[entry].is_jumbo = false;
@@ -1487,41 +1467,23 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
static void stmmac_mmc_setup(struct stmmac_priv *priv)
{
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
- MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- dwmac_mmc_intr_all_mask(priv->ioaddr);
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
+ else
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
+
+ dwmac_mmc_intr_all_mask(priv->mmcaddr);
if (priv->dma_cap.rmon) {
- dwmac_mmc_ctrl(priv->ioaddr, mode);
+ dwmac_mmc_ctrl(priv->mmcaddr, mode);
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
} else
pr_info(" No MAC Management Counters available\n");
}
/**
- * stmmac_get_synopsys_id - return the SYINID.
- * @priv: driver private structure
- * Description: this simple function is to decode and return the SYINID
- * starting from the HW core register.
- */
-static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
-{
- u32 hwid = priv->hw->synopsys_uid;
-
- /* Check Synopsys Id (not available on old chips) */
- if (likely(hwid)) {
- u32 uid = ((hwid & 0x0000ff00) >> 8);
- u32 synid = (hwid & 0x000000ff);
-
- pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
- uid, synid);
-
- return synid;
- }
- return 0;
-}
-
-/**
* stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
* @priv: driver private structure
* Description: select the Enhanced/Alternate or Normal descriptors.
@@ -1558,51 +1520,15 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
*/
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
- u32 hw_cap = 0;
+ u32 ret = 0;
if (priv->hw->dma->get_hw_feature) {
- hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
-
- priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
- priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
- priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
- priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
- priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
- priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
- priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
- priv->dma_cap.pmt_remote_wake_up =
- (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
- priv->dma_cap.pmt_magic_frame =
- (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
- /* MMC */
- priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
- /* IEEE 1588-2002 */
- priv->dma_cap.time_stamp =
- (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
- /* IEEE 1588-2008 */
- priv->dma_cap.atime_stamp =
- (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
- /* 802.3az - Energy-Efficient Ethernet (EEE) */
- priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
- priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
- /* TX and RX csum */
- priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
- priv->dma_cap.rx_coe_type1 =
- (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
- priv->dma_cap.rx_coe_type2 =
- (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
- priv->dma_cap.rxfifo_over_2048 =
- (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
- /* TX and RX number of channels */
- priv->dma_cap.number_rx_channel =
- (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
- priv->dma_cap.number_tx_channel =
- (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
- /* Alternate (enhanced) DESC mode */
- priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
- }
-
- return hw_cap;
+ priv->hw->dma->get_hw_feature(priv->ioaddr,
+ &priv->dma_cap);
+ ret = 1;
+ }
+
+ return ret;
}
/**
@@ -1658,8 +1584,19 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
- if ((priv->synopsys_id >= DWMAC_CORE_3_50) &&
- (priv->plat->axi && priv->hw->dma->axi))
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->rx_tail_addr = priv->dma_rx_phy +
+ (DMA_RX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
+ STMMAC_CHAN0);
+
+ priv->tx_tail_addr = priv->dma_tx_phy +
+ (DMA_TX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
+ }
+
+ if (priv->plat->axi && priv->hw->dma->axi)
priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
return ret;
@@ -1739,7 +1676,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Enable the MAC Rx/Tx */
- stmmac_set_mac(priv->ioaddr, true);
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ stmmac_dwmac4_set_mac(priv->ioaddr, true);
+ else
+ stmmac_set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -1777,6 +1717,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->pcs && priv->hw->mac->ctrl_ane)
priv->hw->mac->ctrl_ane(priv->hw, 0);
+ /* set TX ring length */
+ if (priv->hw->dma->set_tx_ring_len)
+ priv->hw->dma->set_tx_ring_len(priv->ioaddr,
+ (DMA_TX_SIZE - 1));
+ /* set RX ring length */
+ if (priv->hw->dma->set_rx_ring_len)
+ priv->hw->dma->set_rx_ring_len(priv->ioaddr,
+ (DMA_RX_SIZE - 1));
+ /* Enable TSO */
+ if (priv->tso)
+ priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
+
return 0;
}
@@ -1942,6 +1894,239 @@ static int stmmac_release(struct net_device *dev)
}
/**
+ * stmmac_tso_allocator - close entry point of the driver
+ * @priv: driver private structure
+ * @des: buffer start address
+ * @total_len: total length to fill in descriptors
+ * @last_segmant: condition for the last descriptor
+ * Description:
+ * This function fills descriptor and request new descriptors according to
+ * buffer length to fill
+ */
+static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+ int total_len, bool last_segment)
+{
+ struct dma_desc *desc;
+ int tmp_len;
+ u32 buff_size;
+
+ tmp_len = total_len;
+
+ while (tmp_len > 0) {
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ desc = priv->dma_tx + priv->cur_tx;
+
+ desc->des0 = des + (total_len - tmp_len);
+ buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+ TSO_MAX_BUFF_SIZE : tmp_len;
+
+ priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
+ 0, 1,
+ (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+ 0, 0);
+
+ tmp_len -= TSO_MAX_BUFF_SIZE;
+ }
+}
+
+/**
+ * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description: this is the transmit function that is called on TSO frames
+ * (support available on GMAC4 and newer chips).
+ * Diagram below show the ring programming in case of TSO frames:
+ *
+ * First Descriptor
+ * --------
+ * | DES0 |---> buffer1 = L2/L3/L4 header
+ * | DES1 |---> TCP Payload (can continue on next descr...)
+ * | DES2 |---> buffer 1 and 2 len
+ * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
+ * --------
+ * |
+ * ...
+ * |
+ * --------
+ * | DES0 | --| Split TCP Payload on Buffers 1 and 2
+ * | DES1 | --|
+ * | DES2 | --> buffer 1 and 2 len
+ * | DES3 |
+ * --------
+ *
+ * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
+ */
+static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 pay_len, mss;
+ int tmp_pay_len = 0;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int first_entry, des;
+ struct dma_desc *desc, *first, *mss_desc = NULL;
+ u8 proto_hdr_len;
+ int i;
+
+ spin_lock(&priv->tx_lock);
+
+ /* Compute header lengths */
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ /* Desc availability based on threshold should be enough safe */
+ if (unlikely(stmmac_tx_avail(priv) <
+ (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ /* This is a hard error, log it. */
+ pr_err("%s: Tx Ring full when queue awake\n", __func__);
+ }
+ spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
+
+ mss = skb_shinfo(skb)->gso_size;
+
+ /* set new MSS value if needed */
+ if (mss != priv->mss) {
+ mss_desc = priv->dma_tx + priv->cur_tx;
+ priv->hw->desc->set_mss(mss_desc, mss);
+ priv->mss = mss;
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ }
+
+ if (netif_msg_tx_queued(priv)) {
+ pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+ __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
+ pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
+ skb->data_len);
+ }
+
+ first_entry = priv->cur_tx;
+
+ desc = priv->dma_tx + first_entry;
+ first = desc;
+
+ /* first descriptor: fill Headers on Buf1 */
+ des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ priv->tx_skbuff_dma[first_entry].buf = des;
+ priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+ priv->tx_skbuff[first_entry] = skb;
+
+ first->des0 = des;
+
+ /* Fill start of payload in buff2 of first descriptor */
+ if (pay_len)
+ first->des1 = des + proto_hdr_len;
+
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+
+ stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
+
+ /* Prepare fragments */
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ des = skb_frag_dma_map(priv->device, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ stmmac_tso_allocator(priv, des, skb_frag_size(frag),
+ (i == nfrags - 1));
+
+ priv->tx_skbuff_dma[priv->cur_tx].buf = des;
+ priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
+ priv->tx_skbuff[priv->cur_tx] = NULL;
+ priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
+ }
+
+ priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
+
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+
+ if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ if (netif_msg_hw(priv))
+ pr_debug("%s: stop transmitted packets\n", __func__);
+ netif_stop_queue(dev);
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ priv->xstats.tx_tso_frames++;
+ priv->xstats.tx_tso_nfrags += nfrags;
+
+ /* Manage tx mitigation */
+ priv->tx_count_frames += nfrags + 1;
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ mod_timer(&priv->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ } else {
+ priv->tx_count_frames = 0;
+ priv->hw->desc->set_tx_ic(desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ if (!priv->hwts_tx_en)
+ skb_tx_timestamp(skb);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->enable_tx_timestamp(first);
+ }
+
+ /* Complete the first descriptor before granting the DMA */
+ priv->hw->desc->prepare_tso_tx_desc(first, 1,
+ proto_hdr_len,
+ pay_len,
+ 1, priv->tx_skbuff_dma[first_entry].last_segment,
+ tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
+
+ /* If context desc is used to change MSS */
+ if (mss_desc)
+ priv->hw->desc->set_tx_owner(mss_desc);
+
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ smp_wmb();
+
+ if (netif_msg_pktdata(priv)) {
+ pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
+ __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+ priv->cur_tx, first, nfrags);
+
+ priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
+ 0);
+
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb_headlen(skb));
+ }
+
+ netdev_sent_queue(dev, skb->len);
+
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
+
+ spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ spin_unlock(&priv->tx_lock);
+ dev_err(priv->device, "Tx dma map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/**
* stmmac_xmit - Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
@@ -1958,6 +2143,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int entry, first_entry;
struct dma_desc *desc, *first;
unsigned int enh_desc;
+ unsigned int des;
+
+ /* Manage oversized TCP frames for GMAC4 device */
+ if (skb_is_gso(skb) && priv->tso) {
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ return stmmac_tso_xmit(skb, dev);
+ }
spin_lock(&priv->tx_lock);
@@ -1993,7 +2185,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (enh_desc)
is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
- if (unlikely(is_jumbo)) {
+ if (unlikely(is_jumbo) && likely(priv->synopsys_id <
+ DWMAC_CORE_4_00)) {
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
if (unlikely(entry < 0))
goto dma_map_err;
@@ -2011,13 +2204,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
else
desc = priv->dma_tx + entry;
- desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
+ des = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
goto dma_map_err; /* should reuse desc w/o issues */
priv->tx_skbuff[entry] = NULL;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ desc->des0 = des;
+ priv->tx_skbuff_dma[entry].buf = desc->des0;
+ } else {
+ desc->des2 = des;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
+ }
+
priv->tx_skbuff_dma[entry].map_as_page = true;
priv->tx_skbuff_dma[entry].len = len;
priv->tx_skbuff_dma[entry].last_segment = last_segment;
@@ -2032,16 +2233,18 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
+ void *tx_head;
+
pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
__func__, priv->cur_tx, priv->dirty_tx, first_entry,
entry, first, nfrags);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_etx,
- DMA_TX_SIZE, 1);
+ tx_head = (void *)priv->dma_etx;
else
- stmmac_display_ring((void *)priv->dma_tx,
- DMA_TX_SIZE, 0);
+ tx_head = (void *)priv->dma_tx;
+
+ priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
pr_debug(">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
@@ -2080,12 +2283,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!is_jumbo)) {
bool last_segment = (nfrags == 0);
- first->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, first->des2))
+ des = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- priv->tx_skbuff_dma[first_entry].buf = first->des2;
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ first->des0 = des;
+ priv->tx_skbuff_dma[first_entry].buf = first->des0;
+ } else {
+ first->des2 = des;
+ priv->tx_skbuff_dma[first_entry].buf = first->des2;
+ }
+
priv->tx_skbuff_dma[first_entry].len = nopaged_len;
priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
@@ -2109,7 +2319,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
}
netdev_sent_queue(dev, skb->len);
- priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+ else
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
spin_unlock(&priv->tx_lock);
return NETDEV_TX_OK;
@@ -2191,9 +2406,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
dev_kfree_skb(skb);
break;
}
- p->des2 = priv->rx_skbuff_dma[entry];
- priv->hw->mode->refill_desc3(priv, p);
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ p->des0 = priv->rx_skbuff_dma[entry];
+ p->des1 = 0;
+ } else {
+ p->des2 = priv->rx_skbuff_dma[entry];
+ }
+ if (priv->hw->mode->refill_desc3)
+ priv->hw->mode->refill_desc3(priv, p);
if (priv->rx_zeroc_thresh > 0)
priv->rx_zeroc_thresh--;
@@ -2201,9 +2422,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
}
-
wmb();
- priv->hw->desc->set_rx_owner(p);
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+ priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
+ else
+ priv->hw->desc->set_rx_owner(p);
+
wmb();
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
@@ -2226,13 +2451,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
int coe = priv->hw->rx_csum;
if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
pr_debug("%s: descriptor ring:\n", __func__);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_erx,
- DMA_RX_SIZE, 1);
+ rx_head = (void *)priv->dma_erx;
else
- stmmac_display_ring((void *)priv->dma_rx,
- DMA_RX_SIZE, 0);
+ rx_head = (void *)priv->dma_rx;
+
+ priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
int status;
@@ -2282,11 +2509,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
} else {
struct sk_buff *skb;
int frame_len;
+ unsigned int des;
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+ des = p->des0;
+ else
+ des = p->des2;
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
- /* check if frame_len fits the preallocated memory */
+ /* If frame length is greather than skb buffer size
+ * (preallocated during init) then the packet is
+ * ignored
+ */
if (frame_len > priv->dma_buf_sz) {
+ pr_err("%s: len %d larger than size (%d)\n",
+ priv->dev->name, frame_len,
+ priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
break;
}
@@ -2299,14 +2538,19 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (netif_msg_rx_status(priv)) {
pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
- p, entry, p->des2);
+ p, entry, des);
if (frame_len > ETH_FRAME_LEN)
pr_debug("\tframe size %d, COE: %d\n",
frame_len, status);
}
- if (unlikely((frame_len < priv->rx_copybreak) ||
- stmmac_rx_threshold_count(priv))) {
+ /* The zero-copy is always used for all the sizes
+ * in case of GMAC4 because it needs
+ * to refill the used descriptors, always.
+ */
+ if (unlikely(!priv->plat->has_gmac4 &&
+ ((frame_len < priv->rx_copybreak) ||
+ stmmac_rx_threshold_count(priv)))) {
skb = netdev_alloc_skb_ip_align(priv->dev,
frame_len);
if (unlikely(!skb)) {
@@ -2458,7 +2702,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- if (priv->plat->enh_desc)
+ if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
max_mtu = JUMBO_LEN;
else
max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
@@ -2472,6 +2716,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
}
dev->mtu = new_mtu;
+
netdev_update_features(dev);
return 0;
@@ -2496,6 +2741,14 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_CSUM_MASK;
+ /* Disable tso if asked by ethtool */
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if (features & NETIF_F_TSO)
+ priv->tso = true;
+ else
+ priv->tso = false;
+ }
+
return features;
}
@@ -2542,7 +2795,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
}
/* To handle GMAC own interrupts */
- if (priv->plat->has_gmac) {
+ if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats);
if (unlikely(status)) {
@@ -2551,6 +2804,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+ priv->rx_tail_addr,
+ STMMAC_CHAN0);
}
}
@@ -2623,15 +2880,14 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
x = *(u64 *) ep;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des0, ep->basic.des1,
ep->basic.des2, ep->basic.des3);
ep++;
} else {
x = *(u64 *) p;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
- p->des2, p->des3);
+ p->des0, p->des1, p->des2, p->des3);
p++;
}
seq_printf(seq, "\n");
@@ -2714,10 +2970,15 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
- seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
- (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
- seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
- (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
+ (priv->dma_cap.rx_coe) ? "Y" : "N");
+ } else {
+ seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ }
seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
(priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
@@ -2826,27 +3087,35 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->dev->priv_flags |= IFF_UNICAST_FLT;
mac = dwmac1000_setup(priv->ioaddr,
priv->plat->multicast_filter_bins,
- priv->plat->unicast_filter_entries);
+ priv->plat->unicast_filter_entries,
+ &priv->synopsys_id);
+ } else if (priv->plat->has_gmac4) {
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac = dwmac4_setup(priv->ioaddr,
+ priv->plat->multicast_filter_bins,
+ priv->plat->unicast_filter_entries,
+ &priv->synopsys_id);
} else {
- mac = dwmac100_setup(priv->ioaddr);
+ mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
}
if (!mac)
return -ENOMEM;
priv->hw = mac;
- /* Get and dump the chip ID */
- priv->synopsys_id = stmmac_get_synopsys_id(priv);
-
/* To use the chained or ring mode */
- if (chain_mode) {
- priv->hw->mode = &chain_mode_ops;
- pr_info(" Chain mode enabled\n");
- priv->mode = STMMAC_CHAIN_MODE;
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->hw->mode = &dwmac4_ring_mode_ops;
} else {
- priv->hw->mode = &ring_mode_ops;
- pr_info(" Ring mode enabled\n");
- priv->mode = STMMAC_RING_MODE;
+ if (chain_mode) {
+ priv->hw->mode = &chain_mode_ops;
+ pr_info(" Chain mode enabled\n");
+ priv->mode = STMMAC_CHAIN_MODE;
+ } else {
+ priv->hw->mode = &ring_mode_ops;
+ pr_info(" Ring mode enabled\n");
+ priv->mode = STMMAC_RING_MODE;
+ }
}
/* Get the HW capability (new GMAC newer than 3.50a) */
@@ -2868,6 +3137,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
else
priv->plat->tx_coe = priv->dma_cap.tx_coe;
+ /* In case of GMAC4 rx_coe is from HW cap register. */
+ priv->plat->rx_coe = priv->dma_cap.rx_coe;
+
if (priv->dma_cap.rx_coe_type2)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
else if (priv->dma_cap.rx_coe_type1)
@@ -2876,13 +3148,17 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
} else
pr_info(" No HW DMA feature register supported");
- /* To use alternate (extended) or normal descriptor structures */
- stmmac_selec_desc_mode(priv);
+ /* To use alternate (extended), normal or GMAC4 descriptor structures */
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ priv->hw->desc = &dwmac4_desc_ops;
+ else
+ stmmac_selec_desc_mode(priv);
if (priv->plat->rx_coe) {
priv->hw->rx_csum = priv->plat->rx_coe;
- pr_info(" RX Checksum Offload Engine supported (type %d)\n",
- priv->plat->rx_coe);
+ pr_info(" RX Checksum Offload Engine supported\n");
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
}
if (priv->plat->tx_coe)
pr_info(" TX Checksum insertion supported\n");
@@ -2892,6 +3168,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
device_set_wakeup_capable(priv->device, 1);
}
+ if (priv->dma_cap.tsoen)
+ pr_info(" TSO supported\n");
+
return 0;
}
@@ -2995,6 +3274,12 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
+
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ ndev->hw_features |= NETIF_F_TSO;
+ priv->tso = true;
+ pr_info(" TSO feature enabled\n");
+ }
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
@@ -3070,12 +3355,13 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
* stmmac_dvr_remove
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
* changes the link status, releases the DMA descriptor rings.
*/
-int stmmac_dvr_remove(struct net_device *ndev)
+int stmmac_dvr_remove(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
pr_info("%s:\n\tremoving driver", __func__);
@@ -3101,13 +3387,14 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
/**
* stmmac_suspend - suspend callback
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: this is the function to suspend the device and it is called
* by the platform driver to stop the network queue, release the resources,
* program the PMT register (for WoL), clean and release driver resources.
*/
-int stmmac_suspend(struct net_device *ndev)
+int stmmac_suspend(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long flags;
@@ -3150,12 +3437,13 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
* stmmac_resume - resume callback
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: when resume this function is invoked to setup the DMA and CORE
* in a usable state.
*/
-int stmmac_resume(struct net_device *ndev)
+int stmmac_resume(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long flags;
@@ -3189,6 +3477,11 @@ int stmmac_resume(struct net_device *ndev)
priv->dirty_rx = 0;
priv->dirty_tx = 0;
priv->cur_tx = 0;
+ /* reset private mss value to force mss context settings at
+ * next tso xmit (only used for gmac4).
+ */
+ priv->mss = 0;
+
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);