aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_txrx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c1373
1 files changed, 823 insertions, 550 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 4de61dbedd36..dbe80e5053a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -3,28 +3,119 @@
/* The driver transmit and receive code */
-#include <linux/prefetch.h>
#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <net/dsfield.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
#include "ice.h"
+#include "ice_trace.h"
#include "ice_dcb_lib.h"
#include "ice_xsk.h"
+#include "ice_eswitch.h"
#define ICE_RX_HDR_SIZE 256
+#define FDIR_DESC_RXDID 0x40
+#define ICE_FDIR_CLEAN_DELAY 10
+
+/**
+ * ice_prgm_fdir_fltr - Program a Flow Director filter
+ * @vsi: VSI to send dummy packet
+ * @fdir_desc: flow director descriptor
+ * @raw_packet: allocated buffer for flow director
+ */
+int
+ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
+ u8 *raw_packet)
+{
+ struct ice_tx_buf *tx_buf, *first;
+ struct ice_fltr_desc *f_desc;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_ring *tx_ring;
+ struct device *dev;
+ dma_addr_t dma;
+ u32 td_cmd;
+ u16 i;
+
+ /* VSI and Tx ring */
+ if (!vsi)
+ return -ENOENT;
+ tx_ring = vsi->tx_rings[0];
+ if (!tx_ring || !tx_ring->desc)
+ return -ENOENT;
+ dev = tx_ring->dev;
+
+ /* we are using two descriptors to add/del a filter and we can wait */
+ for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
+ if (!i)
+ return -EAGAIN;
+ msleep_interruptible(1);
+ }
+
+ dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma))
+ return -EINVAL;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ first = &tx_ring->tx_buf[i];
+ f_desc = ICE_TX_FDIRDESC(tx_ring, i);
+ memcpy(f_desc, fdir_desc, sizeof(*f_desc));
+
+ i++;
+ i = (i < tx_ring->count) ? i : 0;
+ tx_desc = ICE_TX_DESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_buf[i];
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
+ ICE_TX_DESC_CMD_RE;
+
+ tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
+ tx_buf->raw_buf = raw_packet;
+
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
+
+ /* Force memory write to complete before letting h/w know
+ * there are new descriptors to fetch.
+ */
+ wmb();
+
+ /* mark the data descriptor to be watched */
+ first->next_to_watch = tx_desc;
+
+ writel(tx_ring->next_to_use, tx_ring->tail);
+
+ return 0;
+}
+
/**
* ice_unmap_and_free_tx_buf - Release a Tx buffer
* @ring: the ring that owns the buffer
* @tx_buf: the buffer to free
*/
static void
-ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
+ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
- if (ice_ring_is_xdp(ring))
+ if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ devm_kfree(ring->dev, tx_buf->raw_buf);
+ else if (ice_ring_is_xdp(ring))
page_frag_free(tx_buf->raw_buf);
else
dev_kfree_skb_any(tx_buf->skb);
@@ -46,7 +137,7 @@ ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
/* tx_buf must be completely set up in the transmit path */
}
-static struct netdev_queue *txring_txq(const struct ice_ring *ring)
+static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->q_index);
}
@@ -55,11 +146,12 @@ static struct netdev_queue *txring_txq(const struct ice_ring *ring)
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
-void ice_clean_tx_ring(struct ice_ring *tx_ring)
+void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
{
+ u32 size;
u16 i;
- if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
ice_xsk_clean_xdp_ring(tx_ring);
goto tx_skip_free;
}
@@ -75,11 +167,15 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
tx_skip_free:
memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
/* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ memset(tx_ring->desc, 0, size);
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
+ tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
if (!tx_ring->netdev)
return;
@@ -94,14 +190,18 @@ tx_skip_free:
*
* Free all transmit software resources
*/
-void ice_free_tx_ring(struct ice_ring *tx_ring)
+void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
{
+ u32 size;
+
ice_clean_tx_ring(tx_ring);
devm_kfree(tx_ring->dev, tx_ring->tx_buf);
tx_ring->tx_buf = NULL;
if (tx_ring->desc) {
- dmam_free_coherent(tx_ring->dev, tx_ring->size,
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size,
tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -114,7 +214,7 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
+static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
{
unsigned int total_bytes = 0, total_pkts = 0;
unsigned int budget = ICE_DFLT_IRQ_WORK;
@@ -123,6 +223,9 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
+ /* get the bql data ready */
+ netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
+
tx_buf = &tx_ring->tx_buf[i];
tx_desc = ICE_TX_DESC(tx_ring, i);
i -= tx_ring->count;
@@ -136,8 +239,12 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
if (!eop_desc)
break;
+ /* follow the guidelines of other drivers */
+ prefetchw(&tx_buf->skb->users);
+
smp_rmb(); /* prevent any other reads prior to eop_desc */
+ ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
@@ -150,11 +257,8 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;
- if (ice_ring_is_xdp(tx_ring))
- page_frag_free(tx_buf->raw_buf);
- else
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ /* free the skb */
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -168,6 +272,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
+ ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
tx_buf++;
tx_desc++;
i++;
@@ -186,6 +291,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
dma_unmap_len_set(tx_buf, len, 0);
}
}
+ ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
/* move us one more past the eop_desc for start of next pkt */
tx_buf++;
@@ -207,12 +313,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
tx_ring->next_to_clean = i;
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
-
- if (ice_ring_is_xdp(tx_ring))
- return !!budget;
-
- netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
- total_bytes);
+ netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
@@ -221,11 +322,9 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
* sees the new next_to_clean.
*/
smp_mb();
- if (__netif_subqueue_stopped(tx_ring->netdev,
- tx_ring->q_index) &&
- !test_bit(__ICE_DOWN, vsi->state)) {
- netif_wake_subqueue(tx_ring->netdev,
- tx_ring->q_index);
+ if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
+ !test_bit(ICE_VSI_DOWN, vsi->state)) {
+ netif_tx_wake_queue(txring_txq(tx_ring));
++tx_ring->tx_stats.restart_q;
}
}
@@ -239,9 +338,10 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
*
* Return 0 on success, negative on error
*/
-int ice_setup_tx_ring(struct ice_ring *tx_ring)
+int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
+ u32 size;
if (!dev)
return -ENOMEM;
@@ -249,19 +349,19 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_buf);
tx_ring->tx_buf =
- devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
+ devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
GFP_KERNEL);
if (!tx_ring->tx_buf)
return -ENOMEM;
/* round up to nearest page */
- tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
- PAGE_SIZE);
- tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
+ tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
- tx_ring->size);
+ size);
goto err;
}
@@ -280,16 +380,22 @@ err:
* ice_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
*/
-void ice_clean_rx_ring(struct ice_ring *rx_ring)
+void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ u32 size;
u16 i;
/* ring already cleared, nothing to do */
if (!rx_ring->rx_buf)
return;
- if (rx_ring->xsk_umem) {
+ if (rx_ring->skb) {
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+ }
+
+ if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
@@ -298,10 +404,6 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
- if (rx_buf->skb) {
- dev_kfree_skb(rx_buf->skb);
- rx_buf->skb = NULL;
- }
if (!rx_buf->page)
continue;
@@ -323,10 +425,15 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
}
rx_skip_free:
- memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
+ if (rx_ring->xsk_pool)
+ memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
+ else
+ memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
/* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ memset(rx_ring->desc, 0, size);
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
@@ -339,18 +446,27 @@ rx_skip_free:
*
* Free all receive software resources
*/
-void ice_free_rx_ring(struct ice_ring *rx_ring)
+void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
{
+ u32 size;
+
ice_clean_rx_ring(rx_ring);
if (rx_ring->vsi->type == ICE_VSI_PF)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL;
- devm_kfree(rx_ring->dev, rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
+ if (rx_ring->xsk_pool) {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ } else {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ }
if (rx_ring->desc) {
- dmam_free_coherent(rx_ring->dev, rx_ring->size,
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(rx_ring->dev, size,
rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -362,9 +478,10 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
*
* Return 0 on success, negative on error
*/
-int ice_setup_rx_ring(struct ice_ring *rx_ring)
+int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ u32 size;
if (!dev)
return -ENOMEM;
@@ -372,19 +489,18 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
- devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
- GFP_KERNEL);
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
/* round up to nearest page */
- rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
- PAGE_SIZE);
- rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
- rx_ring->size);
+ size);
goto err;
}
@@ -397,30 +513,30 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
if (rx_ring->vsi->type == ICE_VSI_PF &&
!xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->q_index))
+ rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
goto err;
return 0;
err:
- devm_kfree(dev, rx_ring->rx_buf);
+ kfree(rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
return -ENOMEM;
}
-/**
- * ice_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
+static unsigned int
+ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
{
- if (ice_ring_uses_build_skb(rx_ring))
- return ICE_SKB_PAD;
- else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
- return XDP_PACKET_HEADROOM;
+ unsigned int truesize;
- return 0;
+#if (PAGE_SIZE < 8192)
+ truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = rx_ring->rx_offset ?
+ SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
}
/**
@@ -428,41 +544,45 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
+ * @xdp_ring: ring to be used for XDP_TX action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
-ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
{
- int err, result = ICE_XDP_PASS;
- struct ice_ring *xdp_ring;
+ int err;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
- break;
+ return ICE_XDP_PASS;
case XDP_TX:
- xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
- break;
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_lock(&xdp_ring->tx_lock);
+ err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
+ if (err == ICE_XDP_CONSUMED)
+ goto out_failure;
+ return err;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
- break;
+ if (err)
+ goto out_failure;
+ return ICE_XDP_REDIR;
default:
- bpf_warn_invalid_xdp_action(act);
- /* fallthrough -- not supported action */
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping frame */
+ fallthrough;
case XDP_DROP:
- result = ICE_XDP_CONSUMED;
- break;
+ return ICE_XDP_CONSUMED;
}
-
- return result;
}
/**
@@ -472,8 +592,8 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
* @frames: XDP frames to be transmitted
* @flags: transmit flags
*
- * Returns number of frames successfully sent. Frames that fail are
- * free'ed via XDP return API.
+ * Returns number of frames successfully sent. Failed frames
+ * will be free'ed by XDP core.
* For error cases, a negative errno code is returned and no-frames
* are transmitted (caller must handle freeing frames).
*/
@@ -484,34 +604,46 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct ice_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *xdp_ring;
- int drops = 0, i;
+ struct ice_tx_ring *xdp_ring;
+ int nxmit = 0, i;
- if (test_bit(__ICE_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ if (!ice_is_xdp_ena_vsi(vsi))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
- xdp_ring = vsi->xdp_rings[queue_index];
+ if (static_branch_unlikely(&ice_xdp_locking_key)) {
+ queue_index %= vsi->num_xdp_txq;
+ xdp_ring = vsi->xdp_rings[queue_index];
+ spin_lock(&xdp_ring->tx_lock);
+ } else {
+ /* Generally, should not happen */
+ if (unlikely(queue_index >= vsi->num_xdp_txq))
+ return -ENXIO;
+ xdp_ring = vsi->xdp_rings[queue_index];
+ }
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
- if (err != ICE_XDP_TX) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err != ICE_XDP_TX)
+ break;
+ nxmit++;
}
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
- return n - drops;
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
+
+ return nxmit;
}
/**
@@ -523,16 +655,14 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
* reused.
*/
static bool
-ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
+ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
- if (likely(page)) {
- rx_ring->rx_stats.page_reuse_count++;
+ if (likely(page))
return true;
- }
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
@@ -556,7 +686,7 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
bi->dma = dma;
bi->page = page;
- bi->page_offset = ice_rx_offset(rx_ring);
+ bi->page_offset = rx_ring->rx_offset;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
@@ -576,14 +706,15 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/
-bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
+ if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
+ !cleaned_count)
return false;
/* get the Rx descriptor and buffer based on next_to_use */
@@ -628,15 +759,6 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
}
/**
- * ice_page_is_reserved - check if reuse is possible
- * @page: page struct to check
- */
-static bool ice_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
-/**
* ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
* @rx_buf: Rx buffer to adjust
* @size: Size of adjustment
@@ -661,24 +783,26 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
+ * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed
*/
-static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+static bool
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
- /* avoid re-using remote pages */
- if (unlikely(ice_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define ICE_LAST_OFFSET \
@@ -711,11 +835,11 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
* The function will then update the page offset.
*/
static void
-ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
+ unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
#else
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
@@ -737,7 +861,7 @@ ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* Synchronizes page for reuse by the adapter
*/
static void
-ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
+ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc;
struct ice_rx_buf *new_buf;
@@ -761,21 +885,26 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
/**
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
- * @skb: skb to be used
* @size: size of buffer to add to skb
+ * @rx_buf_pgcnt: rx_buf page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
- const unsigned int size)
+ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
+ int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+ *rx_buf_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buf->page);
+#else
+ 0;
+#endif
prefetchw(rx_buf->page);
- *skb = rx_buf->skb;
if (!size)
return rx_buf;
@@ -800,10 +929,10 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *
-ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
+ u8 metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#else
@@ -818,12 +947,9 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -855,21 +981,20 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* skb correctly.
*/
static struct sk_buff *
-ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
+ unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif /* L1_CACHE_BYTES */
+ net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ ICE_RX_HDR_SIZE + metasize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
@@ -881,8 +1006,13 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
- sizeof(long)));
+ memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
+ ALIGN(headlen + metasize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
@@ -911,14 +1041,17 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
+ * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
* This function will update next_to_clean and then clean up the contents
* of the rx_buf. It will either recycle the buffer or unmap it and free
* the associated resources.
*/
-static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+static void
+ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ int rx_buf_pgcnt)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ u16 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */
ntc = (ntc < rx_ring->count) ? ntc : 0;
@@ -927,10 +1060,9 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf)) {
+ if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
- rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
@@ -941,29 +1073,24 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
/* clear contents of buffer_info */
rx_buf->page = NULL;
- rx_buf->skb = NULL;
}
/**
* ice_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
*
* If the buffer is an EOP buffer, this function exits returning false,
* otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
-ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
- if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
+ if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
return false;
- /* place skb in next buffer to be received */
- rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
@@ -981,26 +1108,38 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*
* Returns amount of work completed
*/
-static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int offset = rx_ring->rx_offset;
+ struct ice_tx_ring *xdp_ring = NULL;
unsigned int xdp_res, xdp_xmit = 0;
+ struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
struct xdp_buff xdp;
bool failure;
- xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (xdp_prog)
+ xdp_ring = rx_ring->xdp_ring;
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf;
- struct sk_buff *skb;
+ unsigned char *hard_start;
unsigned int size;
u16 stat_err_bits;
+ int rx_buf_pgcnt;
u16 vlan_tag = 0;
- u8 rx_ptype;
+ u16 rx_ptype;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
@@ -1011,7 +1150,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
* hardware wrote DD then it will be non-zero
*/
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
- if (!ice_test_staterr(rx_desc, stat_err_bits))
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
break;
/* This memory barrier is needed to keep us from reading
@@ -1020,11 +1159,23 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
*/
dma_rmb();
+ ice_trace(clean_rx_irq, rx_ring, rx_desc);
+ if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
+ struct ice_vsi *ctrl_vsi = rx_ring->vsi;
+
+ if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
+ ctrl_vsi->vf)
+ ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
+ ice_put_rx_buf(rx_ring, NULL, 0);
+ cleaned_count++;
+ continue;
+ }
+
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+ rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
if (!size) {
xdp.data = NULL;
@@ -1034,33 +1185,23 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
goto construct_skb;
}
- xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
- xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
- xdp.data_meta = xdp.data;
- xdp.data_end = xdp.data + size;
+ hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
+#endif
- rcu_read_lock();
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
- if (!xdp_prog) {
- rcu_read_unlock();
+ if (!xdp_prog)
goto construct_skb;
- }
- xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
- rcu_read_unlock();
+ xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
if (!xdp_res)
goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- unsigned int truesize;
-
-#if (PAGE_SIZE < 8192)
- truesize = ice_rx_pg_size(rx_ring) / 2;
-#else
- truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
- size);
-#endif
xdp_xmit |= xdp_res;
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
} else {
rx_buf->pagecnt_bias++;
}
@@ -1068,7 +1209,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++;
cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
continue;
construct_skb:
if (skb) {
@@ -1087,22 +1228,21 @@ construct_skb:
break;
}
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
cleaned_count++;
/* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc, skb))
+ if (ice_is_non_eop(rx_ring, rx_desc))
continue;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
- if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
+ if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+ stat_err_bits))) {
dev_kfree_skb_any(skb);
continue;
}
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
- if (ice_test_staterr(rx_desc, stat_err_bits))
- vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+ vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
if (eth_skb_pad(skb)) {
@@ -1119,8 +1259,10 @@ construct_skb:
ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
ice_receive_skb(rx_ring, skb, vlan_tag);
+ skb = NULL;
/* update budget accounting */
total_rx_pkts++;
@@ -1130,7 +1272,8 @@ construct_skb:
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
if (xdp_prog)
- ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
+ rx_ring->skb = skb;
ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
@@ -1138,218 +1281,68 @@ construct_skb:
return failure ? budget : (int)total_rx_pkts;
}
-/**
- * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
- * @port_info: port_info structure containing the current link speed
- * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
- * @itr: ITR value to update
- *
- * Calculate how big of an increment should be applied to the ITR value passed
- * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
- * link speed.
- *
- * The following is a calculation derived from:
- * wmem_default / (size + overhead) = desired_pkts_per_int
- * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
- * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
- *
- * Assuming wmem_default is 212992 and overhead is 640 bytes per
- * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
- * formula down to:
- *
- * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
- * ITR = -------------------------------------------- * --------------
- * rate pkt_size + 640
- */
-static unsigned int
-ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
- unsigned int avg_pkt_size,
- unsigned int itr)
+static void __ice_update_sample(struct ice_q_vector *q_vector,
+ struct ice_ring_container *rc,
+ struct dim_sample *sample,
+ bool is_tx)
{
- switch (port_info->phy.link_info.link_speed) {
- case ICE_AQ_LINK_SPEED_100GB:
- itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- case ICE_AQ_LINK_SPEED_50GB:
- itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- /* fall through */
- default:
- itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- }
+ u64 packets = 0, bytes = 0;
+
+ if (is_tx) {
+ struct ice_tx_ring *tx_ring;
- if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
- itr &= ICE_ITR_ADAPTIVE_LATENCY;
- itr += ICE_ITR_ADAPTIVE_MAX_USECS;
+ ice_for_each_tx_ring(tx_ring, *rc) {
+ packets += tx_ring->stats.pkts;
+ bytes += tx_ring->stats.bytes;
+ }
+ } else {
+ struct ice_rx_ring *rx_ring;
+
+ ice_for_each_rx_ring(rx_ring, *rc) {
+ packets += rx_ring->stats.pkts;
+ bytes += rx_ring->stats.bytes;
+ }
}
- return itr;
+ dim_update_sample(q_vector->total_events, packets, bytes, sample);
+ sample->comp_ctr = 0;
+
+ /* if dim settings get stale, like when not updated for 1
+ * second or longer, force it to start again. This addresses the
+ * frequent case of an idle queue being switched to by the
+ * scheduler. The 1,000 here means 1,000 milliseconds.
+ */
+ if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
+ rc->dim.state = DIM_START_MEASURE;
}
/**
- * ice_update_itr - update the adaptive ITR value based on statistics
- * @q_vector: structure containing interrupt and ring information
- * @rc: structure containing ring performance data
+ * ice_net_dim - Update net DIM algorithm
+ * @q_vector: the vector associated with the interrupt
+ *
+ * Create a DIM sample and notify net_dim() so that it can possibly decide
+ * a new ITR value based on incoming packets, bytes, and interrupts.
*
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
+ * This function is a no-op if the ring is not configured to dynamic ITR.
*/
-static void
-ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
+static void ice_net_dim(struct ice_q_vector *q_vector)
{
- unsigned long next_update = jiffies;
- unsigned int packets, bytes, itr;
- bool container_is_rx;
+ struct ice_ring_container *tx = &q_vector->tx;
+ struct ice_ring_container *rx = &q_vector->rx;
- if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
- return;
+ if (ITR_IS_DYNAMIC(tx)) {
+ struct dim_sample dim_sample;
- /* If itr_countdown is set it means we programmed an ITR within
- * the last 4 interrupt cycles. This has a side effect of us
- * potentially firing an early interrupt. In order to work around
- * this we need to throw out any data received for a few
- * interrupts following the update.
- */
- if (q_vector->itr_countdown) {
- itr = rc->target_itr;
- goto clear_counts;
+ __ice_update_sample(q_vector, tx, &dim_sample, true);
+ net_dim(&tx->dim, dim_sample);
}
- container_is_rx = (&q_vector->rx == rc);
- /* For Rx we want to push the delay up and default to low latency.
- * for Tx we want to pull the delay down and default to high latency.
- */
- itr = container_is_rx ?
- ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
- ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
-
- /* If we didn't update within up to 1 - 2 jiffies we can assume
- * that either packets are coming in so slow there hasn't been
- * any work, or that there is so much work that NAPI is dealing
- * with interrupt moderation and we don't need to do anything.
- */
- if (time_after(next_update, rc->next_update))
- goto clear_counts;
-
- prefetch(q_vector->vsi->port_info);
-
- packets = rc->total_pkts;
- bytes = rc->total_bytes;
+ if (ITR_IS_DYNAMIC(rx)) {
+ struct dim_sample dim_sample;
- if (container_is_rx) {
- /* If Rx there are 1 to 4 packets and bytes are less than
- * 9000 assume insufficient data to use bulk rate limiting
- * approach unless Tx is already in bulk rate limiting. We
- * are likely latency driven.
- */
- if (packets && packets < 4 && bytes < 9000 &&
- (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
- itr = ICE_ITR_ADAPTIVE_LATENCY;
- goto adjust_by_size_and_speed;
- }
- } else if (packets < 4) {
- /* If we have Tx and Rx ITR maxed and Tx ITR is running in
- * bulk mode and we are receiving 4 or fewer packets just
- * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
- * that the Rx can relax.
- */
- if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
- (q_vector->rx.target_itr & ICE_ITR_MASK) ==
- ICE_ITR_ADAPTIVE_MAX_USECS)
- goto clear_counts;
- } else if (packets > 32) {
- /* If we have processed over 32 packets in a single interrupt
- * for Tx assume we need to switch over to "bulk" mode.
- */
- rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
- }
-
- /* We have no packets to actually measure against. This means
- * either one of the other queues on this vector is active or
- * we are a Tx queue doing TSO with too high of an interrupt rate.
- *
- * Between 4 and 56 we can assume that our current interrupt delay
- * is only slightly too low. As such we should increase it by a small
- * fixed amount.
- */
- if (packets < 56) {
- itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
- if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
- itr &= ICE_ITR_ADAPTIVE_LATENCY;
- itr += ICE_ITR_ADAPTIVE_MAX_USECS;
- }
- goto clear_counts;
+ __ice_update_sample(q_vector, rx, &dim_sample, false);
+ net_dim(&rx->dim, dim_sample);
}
-
- if (packets <= 256) {
- itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
- itr &= ICE_ITR_MASK;
-
- /* Between 56 and 112 is our "goldilocks" zone where we are
- * working out "just right". Just report that our current
- * ITR is good for us.
- */
- if (packets <= 112)
- goto clear_counts;
-
- /* If packet count is 128 or greater we are likely looking
- * at a slight overrun of the delay we want. Try halving
- * our delay to see if that will cut the number of packets
- * in half per interrupt.
- */
- itr >>= 1;
- itr &= ICE_ITR_MASK;
- if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
- itr = ICE_ITR_ADAPTIVE_MIN_USECS;
-
- goto clear_counts;
- }
-
- /* The paths below assume we are dealing with a bulk ITR since
- * number of packets is greater than 256. We are just going to have
- * to compute a value and try to bring the count under control,
- * though for smaller packet sizes there isn't much we can do as
- * NAPI polling will likely be kicking in sooner rather than later.
- */
- itr = ICE_ITR_ADAPTIVE_BULK;
-
-adjust_by_size_and_speed:
-
- /* based on checks above packets cannot be 0 so division is safe */
- itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
- bytes / packets, itr);
-
-clear_counts:
- /* write back value */
- rc->target_itr = itr;
-
- /* next update should occur within next jiffy */
- rc->next_update = next_update + 1;
-
- rc->total_bytes = 0;
- rc->total_pkts = 0;
}
/**
@@ -1373,85 +1366,45 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
}
-/* The act of updating the ITR will cause it to immediately trigger. In order
- * to prevent this from throwing off adaptive update statistics we defer the
- * update so that it can only happen so often. So after either Tx or Rx are
- * updated we make the adaptive scheme wait until either the ITR completely
- * expires via the next_update expiration or we have been through at least
- * 3 interrupts.
- */
-#define ITR_COUNTDOWN_START 3
-
/**
- * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
- * @q_vector: q_vector for which ITR is being updated and interrupt enabled
+ * ice_enable_interrupt - re-enable MSI-X interrupt
+ * @q_vector: the vector associated with the interrupt to enable
+ *
+ * If the VSI is down, the interrupt will not be re-enabled. Also,
+ * when enabling the interrupt always reset the wb_on_itr to false
+ * and trigger a software interrupt to clean out internal state.
*/
-static void ice_update_ena_itr(struct ice_q_vector *q_vector)
+static void ice_enable_interrupt(struct ice_q_vector *q_vector)
{
- struct ice_ring_container *tx = &q_vector->tx;
- struct ice_ring_container *rx = &q_vector->rx;
struct ice_vsi *vsi = q_vector->vsi;
+ bool wb_en = q_vector->wb_on_itr;
u32 itr_val;
- /* when exiting WB_ON_ITR lets set a low ITR value and trigger
- * interrupts to expire right away in case we have more work ready to go
- * already
- */
- if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
- itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
- wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
- /* set target back to last user set value */
- rx->target_itr = rx->itr_setting;
- /* set current to what we just wrote and dynamic if needed */
- rx->current_itr = ICE_WB_ON_ITR_USECS |
- (rx->itr_setting & ICE_ITR_DYNAMIC);
- /* allow normal interrupt flow to start */
- q_vector->itr_countdown = 0;
+ if (test_bit(ICE_DOWN, vsi->state))
return;
- }
- /* This will do nothing if dynamic updates are not enabled */
- ice_update_itr(q_vector, tx);
- ice_update_itr(q_vector, rx);
-
- /* This block of logic allows us to get away with only updating
- * one ITR value with each interrupt. The idea is to perform a
- * pseudo-lazy update with the following criteria.
- *
- * 1. Rx is given higher priority than Tx if both are in same state
- * 2. If we must reduce an ITR that is given highest priority.
- * 3. We then give priority to increasing ITR based on amount.
+ /* trigger an ITR delayed software interrupt when exiting busy poll, to
+ * make sure to catch any pending cleanups that might have been missed
+ * due to interrupt state transition. If busy poll or poll isn't
+ * enabled, then don't update ITR, and just enable the interrupt.
*/
- if (rx->target_itr < rx->current_itr) {
- /* Rx ITR needs to be reduced, this is highest priority */
- itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
- rx->current_itr = rx->target_itr;
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
- } else if ((tx->target_itr < tx->current_itr) ||
- ((rx->target_itr - rx->current_itr) <
- (tx->target_itr - tx->current_itr))) {
- /* Tx ITR needs to be reduced, this is second priority
- * Tx ITR needs to be increased more than Rx, fourth priority
- */
- itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
- tx->current_itr = tx->target_itr;
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
- } else if (rx->current_itr != rx->target_itr) {
- /* Rx ITR needs to be increased, third priority */
- itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
- rx->current_itr = rx->target_itr;
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
- } else {
- /* Still have to re-enable the interrupts */
+ if (!wb_en) {
itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
- if (q_vector->itr_countdown)
- q_vector->itr_countdown--;
- }
+ } else {
+ q_vector->wb_on_itr = false;
- if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
- wr32(&q_vector->vsi->back->hw,
- GLINT_DYN_CTL(q_vector->reg_idx),
- itr_val);
+ /* do two things here with a single write. Set up the third ITR
+ * index to be used for software interrupt moderation, and then
+ * trigger a software interrupt with a rate limit of 20K on
+ * software interrupts, this will help avoid high interrupt
+ * loads due to frequently polling and exiting polling.
+ */
+ itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
+ itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
+ ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
+ GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
+ }
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
}
/**
@@ -1461,32 +1414,31 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
* We need to tell hardware to write-back completed descriptors even when
* interrupts are disabled. Descriptors will be written back on cache line
* boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
- * descriptors may not be written back if they don't fill a cache line until the
- * next interrupt.
+ * descriptors may not be written back if they don't fill a cache line until
+ * the next interrupt.
*
- * This sets the write-back frequency to 2 microseconds as that is the minimum
- * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
- * make sure hardware knows we aren't meddling with the INTENA_M bit.
+ * This sets the write-back frequency to whatever was set previously for the
+ * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
+ * aren't meddling with the INTENA_M bit.
*/
static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
{
struct ice_vsi *vsi = q_vector->vsi;
- /* already in WB_ON_ITR mode no need to change it */
- if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
+ /* already in wb_on_itr mode no need to change it */
+ if (q_vector->wb_on_itr)
return;
- if (q_vector->num_ring_rx)
- wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
- ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
- ICE_RX_ITR));
-
- if (q_vector->num_ring_tx)
- wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
- ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
- ICE_TX_ITR));
+ /* use previously set ITR values for all of the ITR indices by
+ * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
+ * be static in non-adaptive mode (user configured)
+ */
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
+ GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
+ GLINT_DYN_CTL_WB_ON_ITR_M);
- q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
+ q_vector->wb_on_itr = true;
}
/**
@@ -1502,18 +1454,24 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
{
struct ice_q_vector *q_vector =
container_of(napi, struct ice_q_vector, napi);
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
bool clean_complete = true;
- struct ice_ring *ring;
int budget_per_ring;
int work_done = 0;
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- ice_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
- ice_clean_tx_irq_zc(ring, budget) :
- ice_clean_tx_irq(ring, budget);
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ bool wd;
+
+ if (tx_ring->xsk_pool)
+ wd = ice_xmit_zc(tx_ring);
+ else if (ice_ring_is_xdp(tx_ring))
+ wd = true;
+ else
+ wd = ice_clean_tx_irq(tx_ring, budget);
if (!wd)
clean_complete = false;
@@ -1529,21 +1487,21 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* don't allow the budget to go below 1 because that would exit
* polling early.
*/
- budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
+ budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
else
/* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
- ice_for_each_ring(ring, q_vector->rx) {
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
int cleaned;
/* A dedicated path for zero-copy allows making a single
* comparison in the irq context instead of many inside the
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
- cleaned = ring->xsk_umem ?
- ice_clean_rx_irq_zc(ring, budget_per_ring) :
- ice_clean_rx_irq(ring, budget_per_ring);
+ cleaned = rx_ring->xsk_pool ?
+ ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+ ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
@@ -1551,16 +1509,23 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
}
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ /* Set the writeback on ITR so partial completions of
+ * cache-lines will still continue even if we're polling.
+ */
+ ice_set_wb_on_itr(q_vector);
return budget;
+ }
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done)))
- ice_update_ena_itr(q_vector);
- else
+ if (napi_complete_done(napi, work_done)) {
+ ice_net_dim(q_vector);
+ ice_enable_interrupt(q_vector);
+ } else {
ice_set_wb_on_itr(q_vector);
+ }
return min_t(int, work_done, budget - 1);
}
@@ -1572,9 +1537,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*
* Returns -EBUSY if a stop is needed, else 0
*/
-static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
+ netif_tx_stop_queue(txring_txq(tx_ring));
/* Memory barrier before checking head and tail */
smp_mb();
@@ -1582,8 +1547,8 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
if (likely(ICE_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
- /* A reprieve! - use start_subqueue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_tx_start_queue(txring_txq(tx_ring));
++tx_ring->tx_stats.restart_q;
return 0;
}
@@ -1595,7 +1560,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
*
* Returns 0 if stop is not needed
*/
-static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -1614,7 +1579,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
* it and the length into the transmit descriptor.
*/
static void
-ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
+ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
struct ice_tx_offload_params *off)
{
u64 td_offset, td_tag, td_cmd;
@@ -1625,6 +1590,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
struct sk_buff *skb;
skb_frag_t *frag;
dma_addr_t dma;
+ bool kick;
td_tag = off->td_l2tag1;
td_cmd = off->td_cmd;
@@ -1665,7 +1631,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
*/
while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, max_data, td_tag);
+ ice_build_ctob(td_cmd, td_offset, max_data,
+ td_tag);
tx_desc++;
i++;
@@ -1685,8 +1652,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
if (likely(!data_len))
break;
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
- size, td_tag);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
+ size, td_tag);
tx_desc++;
i++;
@@ -1705,9 +1672,6 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
tx_buf = &tx_ring->tx_buf[i];
}
- /* record bytecount for BQL */
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(first->skb);
@@ -1717,8 +1681,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
/* write last descriptor with RS and EOP bits */
td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size,
- td_tag);
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(td_cmd, td_offset, size, td_tag);
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
@@ -1736,7 +1700,10 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
+ kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
+ netdev_xmit_more());
+ if (kick)
+ /* notify HW of packet */
writel(i, tx_ring->tail);
return;
@@ -1785,19 +1752,112 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol)) {
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* compute outer L2 header size */
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
- if (skb->encapsulation)
- return -1;
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
+ first->tx_flags |= ICE_TX_FLAGS_IPV4;
+ else if (ip.v6->version == 6)
+ first->tx_flags |= ICE_TX_FLAGS_IPV6;
+
+ if (skb->encapsulation) {
+ bool gso_ena = false;
+ u32 tunnel = 0;
+
+ /* define outer network header type */
+ if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
+ tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
+ ICE_TX_CTX_EIPT_IPV4 :
+ ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
+ l4_proto = ip.v4->protocol;
+ } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+ int ret;
+
+ tunnel |= ICE_TX_CTX_EIPT_IPV6;
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
+ }
+
+ /* define outer transport */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ break;
+ case IPPROTO_GRE:
+ tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
+ default:
+ if (first->tx_flags & ICE_TX_FLAGS_TSO)
+ return -1;
+
+ skb_checksum_help(skb);
+ return 0;
+ }
+
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ ICE_TXD_CTX_QW0_EIPLEN_S;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
+ /* compute tunnel header size */
+ tunnel |= ((ip.hdr - l4.hdr) / 2) <<
+ ICE_TXD_CTX_QW0_NATLEN_S;
+
+ gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
+ /* indicate if we need to offload outer UDP header */
+ if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
+
+ /* record tunnel offload values */
+ off->cd_tunnel_params |= tunnel;
+
+ /* set DTYP=1 to indicate that it's an Tx context descriptor
+ * in IPsec tunnel mode with Tx offloads in Quad word 1
+ */
+ off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
+
+ /* switch L4 header pointer from outer to inner */
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = 0;
+
+ /* reset type as we transition from outer to inner headers */
+ first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
+ if (ip.v4->version == 4)
+ first->tx_flags |= ICE_TX_FLAGS_IPV4;
+ if (ip.v6->version == 6)
+ first->tx_flags |= ICE_TX_FLAGS_IPV6;
+ }
/* Enable IP checksum offloads */
- protocol = vlan_get_protocol(skb);
- if (protocol == htons(ETH_P_IP)) {
+ if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
@@ -1807,7 +1867,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
else
cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
- } else if (protocol == htons(ETH_P_IPV6)) {
+ } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
@@ -1862,49 +1922,29 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
*
* Checks the skb and set up correspondingly several generic transmit flags
* related to VLAN tagging for the HW, such as VLAN, DCB, etc.
- *
- * Returns error code indicate the frame should be dropped upon error and the
- * otherwise returns 0 to indicate the flags has been set properly.
*/
-static int
-ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
+static void
+ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
- __be16 protocol = skb->protocol;
-
- if (protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
- /* when HW VLAN acceleration is turned off by the user the
- * stack sets the protocol to 8021q so that the driver
- * can take any steps required to support the SW only
- * VLAN handling. In our case the driver doesn't need
- * to take any further steps so just set the protocol
- * to the encapsulated ethertype.
- */
- skb->protocol = vlan_get_protocol(skb);
- return 0;
- }
- /* if we have a HW VLAN tag being added, default to the HW one */
+ /* nothing left to do, software offloaded VLAN */
+ if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
+ return;
+
+ /* the VLAN ethertype/tpid is determined by VSI configuration and netdev
+ * feature flags, which the driver only allows either 802.1Q or 802.1ad
+ * VLAN offloads exclusively so we only care about the VLAN ID here
+ */
if (skb_vlan_tag_present(skb)) {
first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
- } else if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_hdr *vhdr, _vhdr;
-
- /* for SW VLAN, check the next protocol and store the tag */
- vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
- sizeof(_vhdr),
- &_vhdr);
- if (!vhdr)
- return -EINVAL;
-
- first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
- ICE_TX_FLAGS_VLAN_S;
- first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
+ if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ else
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
- return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
+ ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
}
/**
@@ -1929,7 +1969,9 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
- u32 paylen, l4_start;
+ __be16 protocol;
+ u32 paylen;
+ u8 l4_start;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1943,8 +1985,13 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
return err;
/* cppcheck-suppress unreadVariable */
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
@@ -1954,8 +2001,42 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ip.v6->payload_len = 0;
}
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
+ /* determine offset of outer transport header */
+ l4_start = (u8)(l4.hdr - skb->data);
+
+ /* remove payload length from outer checksum */
+ paylen = skb->len - l4_start;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
+
+ /* reset pointers to inner headers */
+
+ /* cppcheck-suppress unreadVariable */
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
+ }
+
/* determine offset of transport header */
- l4_start = l4.hdr - skb->data;
+ l4_start = (u8)(l4.hdr - skb->data);
/* remove payload length from checksum */
paylen = skb->len - l4_start;
@@ -1964,12 +2045,12 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
/* compute length of UDP segmentation header */
- off->header_len = sizeof(l4.udp) + l4_start;
+ off->header_len = (u8)sizeof(l4.udp) + l4_start;
} else {
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen));
/* compute length of TCP segmentation header */
- off->header_len = (l4.tcp->doff * 4) + l4_start;
+ off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
}
/* update gso_segs and bytecount */
@@ -2092,10 +2173,30 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > ICE_MAX_DATA_PER_TXD) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (ICE_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > ICE_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -2103,7 +2204,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
@@ -2132,6 +2233,43 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
}
/**
+ * ice_tstamp - set up context descriptor for hardware timestamp
+ * @tx_ring: pointer to the Tx ring to send buffer on
+ * @skb: pointer to the SKB we're sending
+ * @first: Tx buffer
+ * @off: Tx offload parameters
+ */
+static void
+ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
+ struct ice_tx_buf *first, struct ice_tx_offload_params *off)
+{
+ s8 idx;
+
+ /* only timestamp the outbound packet if the user has requested it */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return;
+
+ if (!tx_ring->ptp_tx)
+ return;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (first->tx_flags & ICE_TX_FLAGS_TSO)
+ return;
+
+ /* Grab an open timestamp slot */
+ idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
+ if (idx < 0) {
+ tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
+ return;
+ }
+
+ off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
+ ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
+ first->tx_flags |= ICE_TX_FLAGS_TSYN;
+}
+
+/**
* ice_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer
* @tx_ring: ring to send buffer on
@@ -2139,14 +2277,17 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
* Returns NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t
-ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
{
struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi;
struct ice_tx_buf *first;
+ struct ethhdr *eth;
unsigned int count;
int tso, csum;
+ ice_trace(xmit_frame_ring, tx_ring, skb);
+
count = ice_xmit_desc_count(skb);
if (ice_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
@@ -2167,6 +2308,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
return NETDEV_TX_BUSY;
}
+ /* prefetch for bql data which is infrequently used */
+ netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
+
offload.tx_ring = tx_ring;
/* record the location of the first descriptor for this packet */
@@ -2177,8 +2321,14 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
first->tx_flags = 0;
/* prepare the VLAN tagging flags for Tx */
- if (ice_tx_prepare_vlan_flags(tx_ring, first))
- goto out_drop;
+ ice_tx_prepare_vlan_flags(tx_ring, first);
+ if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_IL2TAG2 <<
+ ICE_TXD_CTX_QW1_CMD_S));
+ offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
+ ICE_TX_FLAGS_VLAN_S;
+ }
/* set up TSO offload */
tso = ice_tso(first, &offload);
@@ -2191,16 +2341,22 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
goto out_drop;
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
- if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+ eth = (struct ethhdr *)skb_mac_header(skb);
+ if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
vsi->type == ICE_VSI_PF &&
- vsi->port_info->is_sw_lldp))
+ vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
+ ice_tstamp(tx_ring, skb, first, &offload);
+ if (ice_is_switchdev_running(vsi->back))
+ ice_eswitch_set_target_vsi(skb, &offload);
+
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
- int i = tx_ring->next_to_use;
+ u16 i = tx_ring->next_to_use;
/* grab the next descriptor */
cdesc = ICE_TX_CTX_DESC(tx_ring, i);
@@ -2218,6 +2374,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
return NETDEV_TX_OK;
out_drop:
+ ice_trace(xmit_frame_ring_drop, tx_ring, skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2233,7 +2390,7 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
tx_ring = vsi->tx_rings[skb->queue_mapping];
@@ -2245,3 +2402,119 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return ice_xmit_frame_ring(skb, tx_ring);
}
+
+/**
+ * ice_get_dscp_up - return the UP/TC value for a SKB
+ * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
+ * @skb: SKB to query for info to determine UP/TC
+ *
+ * This function is to only be called when the PF is in L3 DSCP PFC mode
+ */
+static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
+{
+ u8 dscp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return dcbcfg->dscp_map[dscp];
+}
+
+u16
+ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *dcbcfg;
+
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
+ skb->priority = ice_get_dscp_up(dcbcfg, skb);
+
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
+/**
+ * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
+ * @tx_ring: tx_ring to clean
+ */
+void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
+{
+ struct ice_vsi *vsi = tx_ring->vsi;
+ s16 i = tx_ring->next_to_clean;
+ int budget = ICE_DFLT_IRQ_WORK;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+
+ tx_buf = &tx_ring->tx_buf[i];
+ tx_desc = ICE_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no pending work */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if the descriptor isn't done, no work to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+ tx_desc->buf_addr = 0;
+ tx_desc->cmd_type_offset_bsz = 0;
+
+ /* move past filter desc */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap the data header */
+ if (dma_unmap_len(tx_buf, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ devm_kfree(tx_ring->dev, tx_buf->raw_buf);
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->raw_buf = NULL;
+ tx_buf->tx_flags = 0;
+ tx_buf->next_to_watch = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+ tx_desc->buf_addr = 0;
+ tx_desc->cmd_type_offset_bsz = 0;
+
+ /* move past eop_desc for start of next FD desc */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ }
+
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ /* re-enable interrupt if needed */
+ ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
+}