aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/tx.c
diff options
context:
space:
mode:
authorJon Cooper <jcooper@solarflare.com>2014-06-11 14:33:08 +0100
committerDavid S. Miller <davem@davemloft.net>2014-06-11 15:36:21 -0700
commitdaf37b556e437ec1ea1a597dcfeff338068380e1 (patch)
tree968e108487ae6ac6fc76c3df46683f83a2a468cb /drivers/net/ethernet/sfc/tx.c
parentnet: fix UDP tunnel GSO of frag_list GRO packets (diff)
downloadlinux-dev-daf37b556e437ec1ea1a597dcfeff338068380e1.tar.xz
linux-dev-daf37b556e437ec1ea1a597dcfeff338068380e1.zip
sfc: PIO:Restrict to 64bit arch and use 64-bit writes.
Fixes:ee45fd92c739 ("sfc: Use TX PIO for sufficiently small packets") The linux net driver uses memcpy_toio() in order to copy into the PIO buffers. Even on a 64bit machine this causes 32bit accesses to a write- combined memory region. There are hardware limitations that mean that only 64bit naturally aligned accesses are safe in all cases. Due to being write-combined memory region two 32bit accesses may be coalesced to form a 64bit non 64bit aligned access. Solution was to open-code the memory copy routines using pointers and to only enable PIO for x86_64 machines. Not tested on platforms other than x86_64 because this patch disables the PIO feature on other platforms. Compile-tested on x86 to ensure that works. The WARN_ON_ONCE() code in the previous version of this patch has been moved into the internal sfc debug driver as the assertion was unnecessary in the upstream kernel code. This bug fix applies to v3.13 and v3.14 stable branches. Signed-off-by: Shradha Shah <sshah@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/sfc/tx.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index fa9475300411..ede8dcca0ff3 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -189,6 +189,18 @@ struct efx_short_copy_buffer {
u8 buf[L1_CACHE_BYTES];
};
+/* Copy in explicit 64-bit writes. */
+static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
+{
+ u64 *src64 = src;
+ u64 __iomem *dest64 = dest;
+ size_t l64 = len / 8;
+ size_t i;
+
+ for (i = 0; i < l64; i++)
+ writeq(src64[i], &dest64[i]);
+}
+
/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
* Advances piobuf pointer. Leaves additional data in the copy buffer.
*/
@@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
{
int block_len = len & ~(sizeof(copy_buf->buf) - 1);
- memcpy_toio(*piobuf, data, block_len);
+ efx_memcpy_64(*piobuf, data, block_len);
*piobuf += block_len;
len -= block_len;
@@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
if (copy_buf->used < sizeof(copy_buf->buf))
return;
- memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
*piobuf += sizeof(copy_buf->buf);
data += copy_to_buf;
len -= copy_to_buf;
@@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
{
/* if there's anything in it, write the whole buffer, including junk */
if (copy_buf->used)
- memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
}
/* Traverse skb structure and copy fragments in to PIO buffer.
@@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
*/
BUILD_BUG_ON(L1_CACHE_BYTES >
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- memcpy_toio(tx_queue->piobuf, skb->data,
- ALIGN(skb->len, L1_CACHE_BYTES));
+ efx_memcpy_64(tx_queue->piobuf, skb->data,
+ ALIGN(skb->len, L1_CACHE_BYTES));
}
EFX_POPULATE_QWORD_5(buffer->option,