aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c76
1 files changed, 50 insertions, 26 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index d003f4d49ae6..122a0af1ea52 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -499,18 +499,41 @@ static bool ice_page_is_reserved(struct page *page)
}
/**
+ * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
+ * @rx_buf: Rx buffer to adjust
+ * @size: Size of adjustment
+ *
+ * Update the offset within page so that Rx buf will be ready to be reused.
+ * For systems with PAGE_SIZE < 8192 this function will flip the page offset
+ * so the second half of page assigned to Rx buffer will be used, otherwise
+ * the offset is moved by the @size bytes
+ */
+static void
+ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ /* flip page offset to other buffer */
+ rx_buf->page_offset ^= size;
+#else
+ /* move offset up to the next cache line */
+ rx_buf->page_offset += size;
+#endif
+}
+
+/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
- * @truesize: the offset that needs to be applied to page
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed
*/
-static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
- unsigned int truesize)
+static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{
+#if (PAGE_SIZE >= 8192)
+ unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
+#endif
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -522,14 +545,8 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
/* if we are only owner of page we can reuse it */
if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += truesize;
-
- if (rx_buf->page_offset > PAGE_SIZE - ICE_RXBUF_2048)
+ if (rx_buf->page_offset > last_offset)
return false;
#endif /* PAGE_SIZE < 8192) */
@@ -556,10 +573,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
* less than the skb header size, otherwise it will just attach the page as
* a frag to the skb.
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset
*/
-static bool
+static void
ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size)
{
@@ -582,14 +598,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
if (size <= ICE_RX_HDR_SIZE) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!ice_page_is_reserved(page))) {
- rx_buf->pagecnt_bias++;
- return true;
- }
-
- /* this page cannot be reused so discard it */
- return false;
+ rx_buf->pagecnt_bias++;
+ return;
}
/* we need the header to contain the greater of either ETH_HLEN or
@@ -610,8 +620,7 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
(unsigned long)va & ~PAGE_MASK, size, truesize);
-
- return ice_can_reuse_rx_page(rx_buf, truesize);
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
}
/**
@@ -697,6 +706,7 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_buf_failed++;
+ rx_buf->pagecnt_bias++;
return NULL;
}
@@ -706,8 +716,23 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
}
/* pull page into skb */
- if (ice_add_rx_frag(rx_buf, skb, size)) {
+ ice_add_rx_frag(rx_buf, skb, size);
+
+ return skb;
+}
+
+/**
+ * ice_put_rx_buf - Clean up used buffer and either recycle or free
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buf. It will
+ * either recycle the buffer or unmap it and free the associated resources.
+ */
+static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+{
/* hand second half of page back to the ring */
+ if (ice_can_reuse_rx_page(rx_buf)) {
ice_reuse_rx_page(rx_ring, rx_buf);
rx_ring->rx_stats.page_reuse_count++;
} else {
@@ -719,8 +744,6 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* clear contents of buffer_info */
rx_buf->page = NULL;
-
- return skb;
}
/**
@@ -1007,6 +1030,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (!skb)
break;
+ ice_put_rx_buf(rx_ring, rx_buf);
cleaned_count++;
/* skip if it is NOP desc */