aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
diff options
context:
space:
mode:
authorJose Abreu <Jose.Abreu@synopsys.com>2019-07-30 15:57:16 +0200
committerDavid S. Miller <davem@davemloft.net>2019-07-30 10:25:58 -0700
commit3caa61c208753492fe59efb20e06f7c608eb8db2 (patch)
tree3e9e925a0876704aded483ffb55cc7ead471130a /drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
parentmlxsw: spectrum_ptp: fix duplicated check on orig_egr_types (diff)
downloadlinux-dev-3caa61c208753492fe59efb20e06f7c608eb8db2.tar.xz
linux-dev-3caa61c208753492fe59efb20e06f7c608eb8db2.zip
net: stmmac: Sync RX Buffer upon allocation
With recent changes that introduced support for Page Pool in stmmac, Jon reported that NFS boot was no longer working on an ARM64 based platform that had the IP behind an IOMMU. As Page Pool API does not guarantee DMA syncing because of the use of DMA_ATTR_SKIP_CPU_SYNC flag, we have to explicit sync the whole buffer upon re-allocation because we are always re-using same pages. In fact, ARM64 code invalidates the DMA area upon two situations [1]: - sync_single_for_cpu(): Invalidates if direction != DMA_TO_DEVICE - sync_single_for_device(): Invalidates if direction == DMA_FROM_DEVICE So, as we must invalidate both the current RX buffer and the newly allocated buffer we propose this fix. [1] arch/arm64/mm/cache.S Reported-by: Jon Hunter <jonathanh@nvidia.com> Tested-by: Jon Hunter <jonathanh@nvidia.com> Fixes: 2af6106ae949 ("net: stmmac: Introducing support for Page Pool") Signed-off-by: Jose Abreu <joabreu@synopsys.com> Tested-by: Ezequiel Garcia <ezequiel@collabora.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 98b1a5c6d537..9a4a56ad35cd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3271,9 +3271,11 @@ static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- int dirty = stmmac_rx_dirty(priv, queue);
+ int len, dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
+ len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+
while (dirty-- > 0) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
struct dma_desc *p;
@@ -3291,6 +3293,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
}
buf->addr = page_pool_get_dma_addr(buf->page);
+
+ /* Sync whole allocation to device. This will invalidate old
+ * data.
+ */
+ dma_sync_single_for_device(priv->device, buf->addr, len,
+ DMA_FROM_DEVICE);
+
stmmac_set_desc_addr(priv, p, buf->addr);
stmmac_refill_desc3(priv, rx_q, p);
@@ -3425,8 +3434,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
skb_copy_to_linear_data(skb, page_address(buf->page),
frame_len);
skb_put(skb, frame_len);
- dma_sync_single_for_device(priv->device, buf->addr,
- frame_len, DMA_FROM_DEVICE);
if (netif_msg_pktdata(priv)) {
netdev_dbg(priv->dev, "frame received (%dbytes)",