aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_txrx.c
diff options
context:
space:
mode:
authorJesse Brandeburg <jesse.brandeburg@intel.com>2021-10-27 12:38:36 -0700
committerTony Nguyen <anthony.l.nguyen@intel.com>2021-12-15 08:46:28 -0800
commitcc14db11c8a40f930fc1e4b254a37e0fce745236 (patch)
tree053e5a007c65310ad7a8e47c4af24fde75f0fa6c /drivers/net/ethernet/intel/ice/ice_txrx.c
parentice: update to newer kernel API (diff)
downloadlinux-dev-cc14db11c8a40f930fc1e4b254a37e0fce745236.tar.xz
linux-dev-cc14db11c8a40f930fc1e4b254a37e0fce745236.zip
ice: use prefetch methods
The kernel provides some prefetch mechanisms to speed up commonly cold cache line accesses during receive processing. Since these are software structures it helps to have these strategically placed prefetches. Be careful to call BQL prefetch complete only for non XDP queues. Co-developed-by: Piotr Raczynski <piotr.raczynski@intel.com> Signed-off-by: Piotr Raczynski <piotr.raczynski@intel.com> Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Tested-by: Gurucharan G <gurucharanx.g@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 12a2edd13877..de9247d45c39 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -3,8 +3,9 @@
/* The driver transmit and receive code */
-#include <linux/prefetch.h>
#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
#include <net/dsfield.h>
#include <net/xdp.h>
@@ -219,6 +220,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
+ /* get the bql data ready */
+ if (!ice_ring_is_xdp(tx_ring))
+ netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
+
tx_buf = &tx_ring->tx_buf[i];
tx_desc = ICE_TX_DESC(tx_ring, i);
i -= tx_ring->count;
@@ -232,6 +237,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
if (!eop_desc)
break;
+ /* follow the guidelines of other drivers */
+ prefetchw(&tx_buf->skb->users);
+
smp_rmb(); /* prevent any other reads prior to eop_desc */
ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
@@ -2265,6 +2273,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
return NETDEV_TX_BUSY;
}
+ /* prefetch for bql data which is infrequently used */
+ netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
+
offload.tx_ring = tx_ring;
/* record the location of the first descriptor for this packet */