aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
diff options
context:
space:
mode:
authorJose Abreu <Jose.Abreu@synopsys.com>2019-06-28 09:29:18 +0200
committerDavid S. Miller <davem@davemloft.net>2019-06-28 09:23:39 -0700
commita993db88d17d20ccf77f7e609935f28bb9bca1c2 (patch)
tree7efb27e0dc82922783d3a2f087f978abc809c2ff /drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
parentnet: stmmac: Do not disable interrupts when cleaning TX (diff)
downloadlinux-dev-a993db88d17d20ccf77f7e609935f28bb9bca1c2.tar.xz
linux-dev-a993db88d17d20ccf77f7e609935f28bb9bca1c2.zip
net: stmmac: Enable support for > 32 Bits addressing in XGMAC
Currently, stmmac only supports 32 bits addressing for SKB. Enable the support for upto 48 bits addressing in XGMAC core. This avoids the use of bounce buffers and increases performance. Changes from v1: - Fallback to 32 bits in failure (Andrew) Signed-off-by: Jose Abreu <joabreu@synopsys.com> Cc: Joao Pinto <jpinto@synopsys.com> Cc: David S. Miller <davem@davemloft.net> Cc: Giuseppe Cavallaro <peppe.cavallaro@st.com> Cc: Alexandre Torgue <alexandre.torgue@st.com> Cc: Andrew Lunn <andrew@lunn.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c55
1 files changed, 43 insertions, 12 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e8f3e76889c8..620dd387e3b1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2772,7 +2772,7 @@ static int stmmac_release(struct net_device *dev)
* This function fills descriptor and request new descriptors according to
* buffer length to fill
*/
-static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
int total_len, bool last_segment, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
@@ -2783,11 +2783,18 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
tmp_len = total_len;
while (tmp_len > 0) {
+ dma_addr_t curr_addr;
+
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
desc = tx_q->dma_tx + tx_q->cur_tx;
- desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
+ curr_addr = des + (total_len - tmp_len);
+ if (priv->dma_cap.addr64 <= 32)
+ desc->des0 = cpu_to_le32(curr_addr);
+ else
+ stmmac_set_desc_addr(priv, desc, curr_addr);
+
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -2833,11 +2840,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
- unsigned int first_entry, des;
+ unsigned int first_entry;
struct stmmac_tx_queue *tx_q;
int tmp_pay_len = 0;
u32 pay_len, mss;
u8 proto_hdr_len;
+ dma_addr_t des;
int i;
tx_q = &priv->tx_queue[queue];
@@ -2894,14 +2902,19 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[first_entry].buf = des;
tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
- first->des0 = cpu_to_le32(des);
+ if (priv->dma_cap.addr64 <= 32) {
+ first->des0 = cpu_to_le32(des);
- /* Fill start of payload in buff2 of first descriptor */
- if (pay_len)
- first->des1 = cpu_to_le32(des + proto_hdr_len);
+ /* Fill start of payload in buff2 of first descriptor */
+ if (pay_len)
+ first->des1 = cpu_to_le32(des + proto_hdr_len);
- /* If needed take extra descriptors to fill the remaining payload */
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+ } else {
+ stmmac_set_desc_addr(priv, first, des);
+ tmp_pay_len = pay_len;
+ }
stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
@@ -3031,12 +3044,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
- int entry;
- unsigned int first_entry;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
+ unsigned int first_entry;
unsigned int enh_desc;
- unsigned int des;
+ dma_addr_t des;
+ int entry;
tx_q = &priv->tx_queue[queue];
@@ -4316,6 +4329,24 @@ int stmmac_dvr_probe(struct device *device,
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
+
+ if (priv->dma_cap.addr64) {
+ ret = dma_set_mask_and_coherent(device,
+ DMA_BIT_MASK(priv->dma_cap.addr64));
+ if (!ret) {
+ dev_info(priv->device, "Using %d bits DMA width\n",
+ priv->dma_cap.addr64);
+ } else {
+ ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(priv->device, "Failed to set DMA Mask\n");
+ goto error_hw_init;
+ }
+
+ priv->dma_cap.addr64 = 32;
+ }
+ }
+
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED