aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c100
1 files changed, 60 insertions, 40 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index edc9a6ac5169..a761001308dc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2468,6 +2468,10 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 68 - 9216 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
+
adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
/* before reading the NVM, reset the controller to put the device in a
@@ -3943,11 +3947,23 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
if (!buffer_info->page)
continue;
- dma_unmap_page(rx_ring->dev,
- buffer_info->dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(buffer_info->page);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ buffer_info->dma,
+ buffer_info->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev,
+ buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_drain(buffer_info->page, 0,
+ buffer_info->pagecnt_bias);
buffer_info->page = NULL;
}
@@ -4931,11 +4947,15 @@ static int igb_tso(struct igb_ring *tx_ring,
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
+ unsigned char *csum_start = skb_checksum_start(skb);
+ unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
/* IP header will have to cancel out any data that
* is not a part of the outer IP header
*/
- ip.v4->check = csum_fold(csum_add(lco_csum(skb),
- csum_unfold(l4.tcp->check)));
+ ip.v4->check = csum_fold(csum_partial(trans_start,
+ csum_start - trans_start,
+ 0));
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0;
@@ -5408,17 +5428,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct pci_dev *pdev = adapter->pdev;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_err(&pdev->dev, "Invalid MTU setting\n");
- return -EINVAL;
- }
-
-#define MAX_STD_JUMBO_FRAME_SIZE 9238
- if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
- dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
- return -EINVAL;
- }
-
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
@@ -6815,12 +6824,6 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
/* transfer page from old buffer to new buffer */
*new_buff = *old_buff;
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
- old_buff->page_offset,
- IGB_RX_BUFSZ,
- DMA_FROM_DEVICE);
}
static inline bool igb_page_is_reserved(struct page *page)
@@ -6832,13 +6835,15 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
struct page *page,
unsigned int truesize)
{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
/* avoid re-using remote pages */
if (unlikely(igb_page_is_reserved(page)))
return false;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_ref_count(page) != pagecnt_bias))
return false;
/* flip page offset to other buffer */
@@ -6851,10 +6856,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
return false;
#endif
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
*/
- page_ref_inc(page);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
@@ -6906,7 +6915,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return true;
/* this page cannot be reused so discard it */
- __free_page(page);
return false;
}
@@ -6941,6 +6949,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
page = rx_buffer->page;
prefetchw(page);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;
@@ -6965,21 +6980,18 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
prefetchw(skb->data);
}
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
/* pull page into skb */
if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
}
/* clear contents of rx_buffer */
@@ -7237,7 +7249,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -7252,6 +7265,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ bi->pagecnt_bias = 1;
return true;
}
@@ -7278,6 +7292,12 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/