diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx_lib.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx_lib.c | 174 |
1 files changed, 135 insertions, 39 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 6da048a6ca7c..7ee38d02d1e5 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -1,14 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019, Intel Corporation. */ +#include <linux/filter.h> + #include "ice_txrx_lib.h" +#include "ice_eswitch.h" +#include "ice_lib.h" /** * ice_release_rx_desc - Store the new tail and head values * @rx_ring: ring to bump * @val: new head index */ -void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) +void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val) { u16 prev_ntu = rx_ring->next_to_use & ~0x7; @@ -38,10 +42,23 @@ void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) * ice_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * - * Returns a hash type to be used by skb_set_hash + * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by + * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of + * Rx desc. */ -static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) +static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) { + struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype); + + if (!decoded.known) + return PKT_HASH_TYPE_NONE; + if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2) + return PKT_HASH_TYPE_L2; + return PKT_HASH_TYPE_NONE; } @@ -53,8 +70,8 @@ static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) * @rx_ptype: the ptype value from the descriptor */ static void -ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u8 rx_ptype) +ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u16 rx_ptype) { struct ice_32b_rx_flex_desc_nic *nic_mdid; u32 hash; @@ -80,15 +97,15 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, * skb->protocol must be set before this function is called */ static void -ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, - union ice_32b_rx_flex_desc *rx_desc, u8 ptype) +ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, + union ice_32b_rx_flex_desc *rx_desc, u16 ptype) { struct ice_rx_ptype_decoded decoded; - u32 rx_error, rx_status; + u16 rx_status0, rx_status1; bool ipv4, ipv6; - rx_status = le16_to_cpu(rx_desc->wb.status_error0); - rx_error = rx_status; + rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); + rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); decoded = ice_decode_rx_desc_ptype(ptype); @@ -101,7 +118,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, return; /* check if HW has decoded the packet and checksum */ - if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) + if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) return; if (!(decoded.known && decoded.outer_ip)) @@ -112,25 +129,38 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); - if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | - BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) + if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | + BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) goto checksum_fail; - else if (ipv6 && (rx_status & - (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) + + if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) goto checksum_fail; /* check for L4 errors and handle packets that were not able to be * checksummed due to arrival speed */ - if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) + if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) goto checksum_fail; + /* check for outer UDP checksum error in tunneled packets */ + if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) && + (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) + goto checksum_fail; + + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT) + skb->csum_level = 1; + /* Only report checksum unnecessary for TCP, UDP, or SCTP */ switch (decoded.inner_prot) { case ICE_RX_PTYPE_INNER_PROT_TCP: case ICE_RX_PTYPE_INNER_PROT_UDP: case ICE_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; + break; default: break; } @@ -152,9 +182,9 @@ checksum_fail: * other fields within the skb. */ void -ice_process_skb_fields(struct ice_ring *rx_ring, +ice_process_skb_fields(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u8 ptype) + struct sk_buff *skb, u16 ptype) { ice_rx_hash(rx_ring, rx_desc, skb, ptype); @@ -162,6 +192,9 @@ ice_process_skb_fields(struct ice_ring *rx_ring, skb->protocol = eth_type_trans(skb, rx_ring->netdev); ice_rx_csum(rx_ring, skb, rx_desc, ptype); + + if (rx_ring->ptp_rx) + ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); } /** @@ -174,27 +207,83 @@ ice_process_skb_fields(struct ice_ring *rx_ring, * gro receive functions (with/without VLAN tag) */ void -ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) +ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { - if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - (vlan_tag & VLAN_VID_MASK)) + netdev_features_t features = rx_ring->netdev->features; + bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK); + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); + napi_gro_receive(&rx_ring->q_vector->napi, skb); } /** + * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring + * @xdp_ring: XDP ring to clean + */ +static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring) +{ + unsigned int total_bytes = 0, total_pkts = 0; + u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); + u16 ntc = xdp_ring->next_to_clean; + struct ice_tx_desc *next_dd_desc; + u16 next_dd = xdp_ring->next_dd; + struct ice_tx_buf *tx_buf; + int i; + + next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd); + if (!(next_dd_desc->cmd_type_offset_bsz & + cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) + return; + + for (i = 0; i < tx_thresh; i++) { + tx_buf = &xdp_ring->tx_buf[ntc]; + + total_bytes += tx_buf->bytecount; + /* normally tx_buf->gso_segs was taken but at this point + * it's always 1 for us + */ + total_pkts++; + + page_frag_free(tx_buf->raw_buf); + dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + tx_buf->raw_buf = NULL; + + ntc++; + if (ntc >= xdp_ring->count) + ntc = 0; + } + + next_dd_desc->cmd_type_offset_bsz = 0; + xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh; + if (xdp_ring->next_dd > xdp_ring->count) + xdp_ring->next_dd = tx_thresh - 1; + xdp_ring->next_to_clean = ntc; + ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes); +} + +/** * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission * @data: packet data pointer * @size: packet data size * @xdp_ring: XDP ring for transmission */ -int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) +int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring) { + u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); u16 i = xdp_ring->next_to_use; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; dma_addr_t dma; + if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh) + ice_clean_xdp_irq(xdp_ring); + if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { xdp_ring->tx_stats.tx_busy++; return ICE_XDP_CONSUMED; @@ -215,21 +304,27 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) tx_desc = ICE_TX_DESC(xdp_ring, i); tx_desc->buf_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0, - size, 0); - - /* Make certain all of the status bits have been updated - * before next_to_watch is written. - */ - smp_wmb(); + tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0, + size, 0); + xdp_ring->xdp_tx_active++; i++; - if (i == xdp_ring->count) + if (i == xdp_ring->count) { i = 0; - - tx_buf->next_to_watch = tx_desc; + tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); + tx_desc->cmd_type_offset_bsz |= + cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); + xdp_ring->next_rs = tx_thresh - 1; + } xdp_ring->next_to_use = i; + if (i > xdp_ring->next_rs) { + tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); + tx_desc->cmd_type_offset_bsz |= + cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); + xdp_ring->next_rs += tx_thresh; + } + return ICE_XDP_TX; } @@ -240,9 +335,9 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) * * Returns negative on failure, 0 on success. */ -int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) +int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring) { - struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) return ICE_XDP_CONSUMED; @@ -252,22 +347,23 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) /** * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map - * @rx_ring: Rx ring + * @xdp_ring: XDP ring * @xdp_res: Result of the receive batch * * This function bumps XDP Tx tail and/or flush redirect map, and * should be called when a batch of packets has been processed in the * napi loop. */ -void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res) +void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res) { if (xdp_res & ICE_XDP_REDIR) xdp_do_flush_map(); if (xdp_res & ICE_XDP_TX) { - struct ice_ring *xdp_ring = - rx_ring->vsi->xdp_rings[rx_ring->q_index]; - + if (static_branch_unlikely(&ice_xdp_locking_key)) + spin_lock(&xdp_ring->tx_lock); ice_xdp_ring_update_tail(xdp_ring); + if (static_branch_unlikely(&ice_xdp_locking_key)) + spin_unlock(&xdp_ring->tx_lock); } } |