aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40evf
diff options
context:
space:
mode:
authorScott Peterson <scott.d.peterson@intel.com>2017-02-09 23:37:28 -0800
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2017-02-11 20:39:01 -0800
commit7987dcd7b95111a5acbf5abdbf155eedacd3546b (patch)
treef89cb5472c01b4c3020de196674ea368f781f899 /drivers/net/ethernet/intel/i40evf
parenti40evf: track outstanding client request (diff)
downloadlinux-dev-7987dcd7b95111a5acbf5abdbf155eedacd3546b.tar.xz
linux-dev-7987dcd7b95111a5acbf5abdbf155eedacd3546b.zip
i40e/i40evf: Limit DMA sync of RX buffers to actual packet size
On packet RX, we perform a DMA sync for CPU before passing the packet up. Here we limit that sync to the actual length of the incoming packet, rather than always syncing the entire buffer. Change-ID: I626aaf6c37275a8ce9e81efcaa773f327b331487 Signed-off-by: Scott Peterson <scott.d.peterson@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index d4e488267988..4264d9133366 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1003,7 +1003,7 @@ static inline bool i40e_page_is_reserved(struct page *page)
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
+ * @size: packet length from rx_desc
* @skb: sk_buff to place the data into
*
* This function will add the data contained in rx_buffer->page to the skb.
@@ -1016,13 +1016,10 @@ static inline bool i40e_page_is_reserved(struct page *page)
**/
static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- union i40e_rx_desc *rx_desc,
+ unsigned int size,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
- u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
#if (PAGE_SIZE < 8192)
unsigned int truesize = I40E_RXBUFFER_2048;
#else
@@ -1091,6 +1088,11 @@ static inline
struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc)
{
+ u64 local_status_error_len =
+ le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size =
+ (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
struct i40e_rx_buffer *rx_buffer;
struct sk_buff *skb;
struct page *page;
@@ -1132,11 +1134,11 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- I40E_RXBUFFER_2048,
+ size,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;