aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c54
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c146
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h10
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c46
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c80
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c6
-rw-r--r--drivers/net/ethernet/ec_bhf.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c24
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c21
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c55
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c23
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ethernet/korina.c12
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c17
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c77
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c5
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c5
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c18
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c27
116 files changed, 1361 insertions, 643 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 881f88754bf6..52571486705e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -236,36 +236,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
struct ena_tx_buffer *tx_info,
struct xdp_frame *xdpf,
- void **push_hdr,
- u32 *push_len)
+ struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_adapter *adapter = xdp_ring->adapter;
struct ena_com_buf *ena_buf;
- dma_addr_t dma = 0;
+ int push_len = 0;
+ dma_addr_t dma;
+ void *data;
u32 size;
tx_info->xdpf = xdpf;
+ data = tx_info->xdpf->data;
size = tx_info->xdpf->len;
- ena_buf = tx_info->bufs;
- /* llq push buffer */
- *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
- *push_hdr = tx_info->xdpf->data;
+ if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Designate part of the packet for LLQ */
+ push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+
+ ena_tx_ctx->push_header = data;
+
+ size -= push_len;
+ data += push_len;
+ }
+
+ ena_tx_ctx->header_len = push_len;
- if (size - *push_len > 0) {
+ if (size > 0) {
dma = dma_map_single(xdp_ring->dev,
- *push_hdr + *push_len,
- size - *push_len,
+ data,
+ size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
goto error_report_dma_error;
- tx_info->map_linear_data = 1;
- tx_info->num_of_bufs = 1;
- }
+ tx_info->map_linear_data = 0;
- ena_buf->paddr = dma;
- ena_buf->len = size;
+ ena_buf = tx_info->bufs;
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ ena_tx_ctx->ena_bufs = ena_buf;
+ ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+ }
return 0;
@@ -274,10 +286,6 @@ error_report_dma_error:
&xdp_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
- xdp_return_frame_rx_napi(tx_info->xdpf);
- tx_info->xdpf = NULL;
- tx_info->num_of_bufs = 0;
-
return -EINVAL;
}
@@ -289,8 +297,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
struct ena_com_tx_ctx ena_tx_ctx = {};
struct ena_tx_buffer *tx_info;
u16 next_to_use, req_id;
- void *push_hdr;
- u32 push_len;
int rc;
next_to_use = xdp_ring->next_to_use;
@@ -298,15 +304,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
tx_info = &xdp_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0;
- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
+ rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
if (unlikely(rc))
return rc;
- ena_tx_ctx.ena_bufs = tx_info->bufs;
- ena_tx_ctx.push_header = push_hdr;
- ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
ena_tx_ctx.req_id = req_id;
- ena_tx_ctx.header_len = push_len;
rc = ena_xmit_common(dev,
xdp_ring,
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index b3d74332ed33..7748b276e5fd 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1849,6 +1849,7 @@ out_free_netdev:
free_netdev(netdev);
out_pci_release:
pci_release_mem_regions(pdev);
+ pci_disable_pcie_error_reporting(pdev);
out_pci_disable:
pci_disable_device(pdev);
return err;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index c0986096c701..5bace8a93d73 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8247,9 +8247,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
BNX2_WR(bp, PCI_COMMAND, reg);
} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
!(bp->flags & BNX2_FLAG_PCIX)) {
-
dev_err(&pdev->dev,
"5706 A1 can only be used in a PCIX bus, aborting\n");
+ rc = -EPERM;
goto err_out_unmap;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index d21f085044cd..27943b0446c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1223,8 +1223,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
goto failed;
/* SR-IOV capability was enabled but there are no VFs*/
- if (iov->total == 0)
+ if (iov->total == 0) {
+ err = -EINVAL;
goto failed;
+ }
iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2985844634c8..aef3fccc27a9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -282,7 +282,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
- idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
+ idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
+ idx == NETXTREME_E_P5_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -6932,17 +6933,10 @@ ctx_err:
static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
__le64 *pg_dir)
{
- u8 pg_size = 0;
-
if (!rmem->nr_pages)
return;
- if (BNXT_PAGE_SHIFT == 13)
- pg_size = 1 << 4;
- else if (BNXT_PAGE_SIZE == 16)
- pg_size = 2 << 4;
-
- *pg_attr = pg_size;
+ BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
if (rmem->depth >= 1) {
if (rmem->depth == 2)
*pg_attr |= 2;
@@ -7314,7 +7308,7 @@ skip_rdma:
entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
- entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
+ entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
entries = roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
@@ -10785,37 +10779,125 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
+ u8 **nextp)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
+ int hdr_count = 0;
+ u8 *nexthdr;
+ int start;
+
+ /* Check that there are at most 2 IPv6 extension headers, no
+ * fragment header, and each is <= 64 bytes.
+ */
+ start = nw_off + sizeof(*ip6h);
+ nexthdr = &ip6h->nexthdr;
+ while (ipv6_ext_hdr(*nexthdr)) {
+ struct ipv6_opt_hdr *hp;
+ int hdrlen;
+
+ if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
+ *nexthdr == NEXTHDR_FRAGMENT)
+ return false;
+ hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
+ skb_headlen(skb), NULL);
+ if (!hp)
+ return false;
+ if (*nexthdr == NEXTHDR_AUTH)
+ hdrlen = ipv6_authlen(hp);
+ else
+ hdrlen = ipv6_optlen(hp);
+
+ if (hdrlen > 64)
+ return false;
+ nexthdr = &hp->nexthdr;
+ start += hdrlen;
+ hdr_count++;
+ }
+ if (nextp) {
+ /* Caller will check inner protocol */
+ if (skb->encapsulation) {
+ *nextp = nexthdr;
+ return true;
+ }
+ *nextp = NULL;
+ }
+ /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
+ return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
+}
+
+/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
+static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ __be16 udp_port = uh->dest;
+
+ if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+ return false;
+ if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+ struct ethhdr *eh = inner_eth_hdr(skb);
+
+ switch (eh->h_proto) {
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ return bnxt_exthdr_check(bp, skb,
+ skb_inner_network_offset(skb),
+ NULL);
+ }
+ }
+ return false;
+}
+
+static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
+{
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ return bnxt_udp_tunl_check(bp, skb);
+ case IPPROTO_IPIP:
+ return true;
+ case IPPROTO_GRE: {
+ switch (skb->inner_protocol) {
+ default:
+ return false;
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ fallthrough;
+ }
+ }
+ case IPPROTO_IPV6:
+ /* Check ext headers of inner ipv6 */
+ return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+ NULL);
+ }
+ return false;
+}
+
static netdev_features_t bnxt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- struct bnxt *bp;
- __be16 udp_port;
- u8 l4_proto = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ u8 *l4_proto;
features = vlan_features_check(skb, features);
- if (!skb->encapsulation)
- return features;
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
- l4_proto = ip_hdr(skb)->protocol;
+ if (!skb->encapsulation)
+ return features;
+ l4_proto = &ip_hdr(skb)->protocol;
+ if (bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
break;
case htons(ETH_P_IPV6):
- l4_proto = ipv6_hdr(skb)->nexthdr;
+ if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
+ &l4_proto))
+ break;
+ if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
break;
- default:
- return features;
}
-
- if (l4_proto != IPPROTO_UDP)
- return features;
-
- bp = netdev_priv(dev);
- /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
- udp_port = udp_hdr(skb)->dest;
- if (udp_port == bp->vxlan_port || udp_port == bp->nge_port)
- return features;
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
@@ -11668,6 +11750,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
bnxt_hwrm_coal_params_qcaps(bp);
}
+static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
+
static int bnxt_fw_init_one(struct bnxt *bp)
{
int rc;
@@ -11682,6 +11766,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
netdev_err(bp->dev, "Firmware init phase 2 failed\n");
return rc;
}
+ rc = bnxt_probe_phy(bp, false);
+ if (rc)
+ return rc;
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
if (rc)
return rc;
@@ -13073,6 +13160,7 @@ init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_ethtool_free(bp);
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 98e0cef4532c..30e47ea343f9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1457,6 +1457,16 @@ struct bnxt_ctx_pg_info {
#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNXT_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNXT_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNXT_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6bc7d41d519b..a0c7b1167dbb 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2867,6 +2867,9 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
struct gem_stats *hwstat = &bp->hw_stats.gem;
struct net_device_stats *nstat = &bp->dev->stats;
+ if (!netif_running(bp->dev))
+ return nstat;
+
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 314f8d806723..9058f09f921e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
bool persistent, u8 *smt_idx);
int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
-int cxgb_open(struct net_device *dev);
-int cxgb_close(struct net_device *dev);
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
int cxgb4_port_mirror_alloc(struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 61ea3ec5c3fc..83ed10ac8660 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
return ret;
}
- spin_lock_bh(&adap->win0_lock);
+ /* We have to RESET the chip/firmware because we need the
+ * chip in uninitialized state for loading new PHY image.
+ * Otherwise, the running firmware will only store the PHY
+ * image in local RAM which will be lost after next reset.
+ */
+ ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
+ if (ret < 0) {
+ dev_err(adap->pdev_dev,
+ "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
+ ret);
+ return ret;
+ }
+
ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
- spin_unlock_bh(&adap->win0_lock);
- if (ret)
- dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
+ if (ret < 0) {
+ dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
+ ret);
+ return ret;
+ }
- return ret;
+ return 0;
}
static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
@@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
u32 ftid)
{
struct tid_info *t = &adap->tids;
- struct filter_entry *f;
- if (ftid < t->nhpftids)
- f = &adap->tids.hpftid_tab[ftid];
- else if (ftid < t->nftids)
- f = &adap->tids.ftid_tab[ftid - t->nhpftids];
- else
- f = lookup_tid(&adap->tids, ftid);
+ if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
+ return &t->hpftid_tab[ftid - t->hpftid_base];
- return f;
+ if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
+ return &t->ftid_tab[ftid - t->ftid_base];
+
+ return lookup_tid(t, ftid);
}
static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
@@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
filter_id = filter_info->loc_array[cmd->fs.location];
f = cxgb4_get_filter_entry(adapter, filter_id);
+ if (f->fs.prio)
+ filter_id -= adapter->tids.hpftid_base;
+ else if (!f->fs.hash)
+ filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
if (ret)
goto err;
@@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
filter_info = &adapter->ethtool_filters->port[pi->port_id];
+ if (fs.prio)
+ tid += adapter->tids.hpftid_base;
+ else if (!fs.hash)
+ tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
filter_info->loc_array[cmd->fs.location] = tid;
set_bit(cmd->fs.location, filter_info->bmap);
filter_info->in_use++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index bc581b149b11..6260b3bebd2b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
WORD_MASK, f->fs.nat_lip[3] |
f->fs.nat_lip[2] << 8 |
f->fs.nat_lip[1] << 16 |
- (u64)f->fs.nat_lip[0] << 25, 1);
+ (u64)f->fs.nat_lip[0] << 24, 1);
}
}
@@ -1042,7 +1042,7 @@ void clear_all_filters(struct adapter *adapter)
cxgb4_del_filter(dev, f->tid, &f->fs);
}
- sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
+ sb = adapter->tids.stid_base;
for (i = 0; i < sb; i++) {
f = (struct filter_entry *)adapter->tids.tid_tab[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6264bc66a4fc..762113a04dde 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
/*
* net_device operations
*/
-int cxgb_open(struct net_device *dev)
+static int cxgb_open(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2882,7 +2882,7 @@ out_unlock:
return err;
}
-int cxgb_close(struct net_device *dev)
+static int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
/* Load PHY Firmware onto adapter.
*/
- spin_lock_bh(&adap->win0_lock);
ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
(u8 *)phyf->data, phyf->size);
- spin_unlock_bh(&adap->win0_lock);
if (ret < 0)
dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
-ret);
@@ -6480,9 +6478,9 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
direction);
- cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
out_unlock:
+ cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
mutex_unlock(&uld_mutex);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 1b88bd1c2dbe..dd9be229819a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
if (!ch_flower)
return -ENOENT;
+ rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
+ adap->flower_ht_params);
+
ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
&ch_flower->fs, ch_flower->filter_id);
if (ret)
- goto err;
+ netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
+ ch_flower->filter_id, ret);
- ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
- adap->flower_ht_params);
- if (ret) {
- netdev_err(dev, "Flow remove from rhashtable failed");
- goto err;
- }
kfree_rcu(ch_flower, rcu);
-
-err:
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 6c259de96f96..338b04f339b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
* down before configuring tc params.
*/
if (netif_running(dev)) {
- cxgb_close(dev);
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
needs_bring_up = true;
}
@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
}
out:
- if (needs_bring_up)
- cxgb_open(dev);
+ if (needs_bring_up) {
+ netif_tx_start_all_queues(dev);
+ netif_carrier_on(dev);
+ }
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1e5f2edb70cf..6a099cb34b12 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
if (!eosw_txq)
return -ENOMEM;
+ if (!(adap->flags & CXGB4_FW_OK)) {
+ /* Don't stall caller when access to FW is lost */
+ complete(&eosw_txq->completion);
+ return -EIO;
+ }
+
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 9428ef1f04a8..a0555f4d76fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
* @addr: the start address to write
* @n: length of data to write in bytes
* @data: the data to write
+ * @byte_oriented: whether to store data as bytes or as words
*
* Writes up to a page of data (256 bytes) to the serial flash starting
* at the given address. All the data must be written to the same page.
+ * If @byte_oriented is set the write data is stored as byte stream
+ * (i.e. matches what on disk), otherwise in big-endian.
*/
static int t4_write_flash(struct adapter *adapter, unsigned int addr,
- unsigned int n, const u8 *data)
+ unsigned int n, const u8 *data, bool byte_oriented)
{
- int ret;
- u32 buf[64];
unsigned int i, c, left, val, offset = addr & 0xff;
+ u32 buf[64];
+ int ret;
if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
return -EINVAL;
@@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
(ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
goto unlock;
- for (left = n; left; left -= c) {
+ for (left = n; left; left -= c, data += c) {
c = min(left, 4U);
- for (val = 0, i = 0; i < c; ++i)
- val = (val << 8) + *data++;
+ for (val = 0, i = 0; i < c; ++i) {
+ if (byte_oriented)
+ val = (val << 8) + data[i];
+ else
+ val = (val << 8) + data[c - i - 1];
+ }
ret = sf1_write(adapter, c, c != left, 1, val);
if (ret)
@@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
/* Read the page to verify the write succeeded */
- ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
+ byte_oriented);
if (ret)
return ret;
@@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
- ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
+ ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
if (ret)
goto out;
@@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
if (ret)
goto out;
}
- ret = t4_write_flash(adap,
- fw_start + offsetof(struct fw_hdr, fw_ver),
- sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+ ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
+ true);
out:
if (ret)
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
@@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
/* Copy the supplied PHY Firmware image to the adapter memory location
* allocated by the adapter firmware.
*/
+ spin_lock_bh(&adap->win0_lock);
ret = t4_memory_rw(adap, win, mtype, maddr,
phy_fw_size, (__be32 *)phy_fw_data,
T4_MEMORY_WRITE);
+ spin_unlock_bh(&adap->win0_lock);
if (ret)
return ret;
@@ -10208,7 +10218,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
n = size - i;
else
n = SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, n, cfg_data);
+ ret = t4_write_flash(adap, addr, n, cfg_data, true);
if (ret)
goto out;
@@ -10677,13 +10687,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
boot_data += SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
+ false);
if (ret)
goto out;
}
ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
- (const u8 *)header);
+ (const u8 *)header, false);
out:
if (ret)
@@ -10758,7 +10769,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
for (i = 0; i < size; i += SF_PAGE_SIZE) {
n = min_t(u32, size - i, SF_PAGE_SIZE);
- ret = t4_write_flash(adap, addr, n, cfg_data);
+ ret = t4_write_flash(adap, addr, n, cfg_data, false);
if (ret)
goto out;
@@ -10770,7 +10781,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
for (i = 0; i < npad; i++) {
u8 data = 0;
- ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
+ ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
+ false);
if (ret)
goto out;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index ef3f1e92632f..59683f79959c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -59,6 +59,7 @@ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
}
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
+static void clear_conn_resources(struct chcr_ktls_info *tx_info);
/*
* chcr_ktls_save_keys: calculate and save crypto keys.
* @tx_info - driver specific tls info.
@@ -364,10 +365,14 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
struct ch_ktls_port_stats_debug *port_stats;
+ struct chcr_ktls_uld_ctx *u_ctx;
if (!tx_info)
return;
+ u_ctx = tx_info->adap->uld[CXGB4_ULD_KTLS].handle;
+ if (u_ctx && u_ctx->detach)
+ return;
/* clear l2t entry */
if (tx_info->l2te)
cxgb4_l2t_release(tx_info->l2te);
@@ -384,6 +389,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
if (tx_info->tid != -1) {
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
+
+ xa_erase(&u_ctx->tid_list, tx_info->tid);
}
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
@@ -411,6 +418,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_uld_ctx *u_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
struct adapter *adap;
@@ -425,6 +433,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
adap = pi->adapter;
port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_open);
+ u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
pr_err("not expecting for RX direction\n");
@@ -434,6 +443,9 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (tx_ctx->chcr_info)
goto out;
+ if (u_ctx && u_ctx->detach)
+ goto out;
+
tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
if (!tx_info)
goto out;
@@ -569,6 +581,8 @@ free_tid:
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
+ xa_erase(&u_ctx->tid_list, tx_info->tid);
+
put_module:
/* release module refcount */
module_put(THIS_MODULE);
@@ -633,8 +647,12 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
{
const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_uld_ctx *u_ctx;
unsigned int atid, tid, status;
+ struct tls_context *tls_ctx;
struct tid_info *t;
+ int ret = 0;
tid = GET_TID(p);
status = AOPEN_STATUS_G(ntohl(p->atid_status));
@@ -666,14 +684,29 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
if (!status) {
tx_info->tid = tid;
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
+ /* Adding tid */
+ tls_ctx = tls_get_ctx(tx_info->sk);
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+ u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
+ if (u_ctx) {
+ ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
+ GFP_NOWAIT);
+ if (ret < 0) {
+ pr_err("%s: Failed to allocate tid XA entry = %d\n",
+ __func__, tx_info->tid);
+ tx_info->open_state = CH_KTLS_OPEN_FAILURE;
+ goto out;
+ }
+ }
tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
} else {
tx_info->open_state = CH_KTLS_OPEN_FAILURE;
}
+out:
spin_unlock(&tx_info->lock);
complete(&tx_info->completion);
- return 0;
+ return ret;
}
/*
@@ -2090,6 +2123,8 @@ static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
goto out;
}
u_ctx->lldi = *lldi;
+ u_ctx->detach = false;
+ xa_init_flags(&u_ctx->tid_list, XA_FLAGS_LOCK_BH);
out:
return u_ctx;
}
@@ -2123,6 +2158,45 @@ static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+static void clear_conn_resources(struct chcr_ktls_info *tx_info)
+{
+ /* clear l2t entry */
+ if (tx_info->l2te)
+ cxgb4_l2t_release(tx_info->l2te);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(tx_info->netdev, (const u32 *)
+ &tx_info->sk->sk_v6_rcv_saddr,
+ 1);
+#endif
+
+ /* clear tid */
+ if (tx_info->tid != -1)
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tx_info->tid, tx_info->ip_family);
+}
+
+static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
+{
+ struct ch_ktls_port_stats_debug *port_stats;
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_info *tx_info;
+ unsigned long index;
+
+ xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
+ tx_info = tx_ctx->chcr_info;
+ clear_conn_resources(tx_info);
+ port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_close);
+ kvfree(tx_info);
+ tx_ctx->chcr_info = NULL;
+ /* release module refcount */
+ module_put(THIS_MODULE);
+ }
+}
+
static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct chcr_ktls_uld_ctx *u_ctx = handle;
@@ -2139,7 +2213,10 @@ static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
case CXGB4_STATE_DETACH:
pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
mutex_lock(&dev_mutex);
+ u_ctx->detach = true;
list_del(&u_ctx->entry);
+ ch_ktls_reset_all_conn(u_ctx);
+ xa_destroy(&u_ctx->tid_list);
mutex_unlock(&dev_mutex);
break;
default:
@@ -2178,6 +2255,7 @@ static void __exit chcr_ktls_exit(void)
adap = pci_get_drvdata(u_ctx->lldi.pdev);
memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
list_del(&u_ctx->entry);
+ xa_destroy(&u_ctx->tid_list);
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index 18b3b1f02415..10572dc55365 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -75,6 +75,8 @@ struct chcr_ktls_ofld_ctx_tx {
struct chcr_ktls_uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
+ struct xarray tid_list;
+ bool detach;
};
static inline struct chcr_ktls_ofld_ctx_tx *
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 188d871f6b8c..c320cc8ca68d 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1564,8 +1564,10 @@ found_ok_skb:
cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
sizeof(thdr->type), &thdr->type);
- if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
- return -EIO;
+ if (cerr && thdr->type != TLS_RECORD_TYPE_DATA) {
+ copied = -EIO;
+ break;
+ }
/* don't send tls header, skip copy */
goto skip_copy;
}
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 46b0dbab8aad..7c992172933b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
struct ec_bhf_priv *priv = netdev_priv(net_dev);
unregister_netdev(net_dev);
- free_netdev(net_dev);
pci_iounmap(dev, priv->dma_io);
pci_iounmap(dev, priv->io);
+
+ free_netdev(net_dev);
+
pci_release_regions(dev);
pci_clear_master(dev);
pci_disable_device(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b6eba29d8e99..7968568bbe21 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5897,6 +5897,7 @@ drv_cleanup:
unmap_bars:
be_unmap_pci_bars(adapter);
free_netdev:
+ pci_disable_pcie_error_reporting(pdev);
free_netdev(netdev);
rel_reg:
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f2065f9d02e6..ad82cffc6f3f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1662,7 +1662,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
}
/* ------------------------------------------------------------------------- */
-static void fec_get_mac(struct net_device *ndev)
+static int fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned char *iap, tmpaddr[ETH_ALEN];
@@ -1685,6 +1685,8 @@ static void fec_get_mac(struct net_device *ndev)
ret = of_get_mac_address(np, tmpaddr);
if (!ret)
iap = tmpaddr;
+ else if (ret == -EPROBE_DEFER)
+ return ret;
}
}
@@ -1723,7 +1725,7 @@ static void fec_get_mac(struct net_device *ndev)
eth_hw_addr_random(ndev);
dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
ndev->dev_addr);
- return;
+ return 0;
}
memcpy(ndev->dev_addr, iap, ETH_ALEN);
@@ -1731,6 +1733,8 @@ static void fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+
+ return 0;
}
/* ------------------------------------------------------------------------- */
@@ -3290,7 +3294,9 @@ static int fec_enet_init(struct net_device *ndev)
return ret;
}
- fec_enet_alloc_queue(ndev);
+ ret = fec_enet_alloc_queue(ndev);
+ if (ret)
+ return ret;
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
@@ -3298,11 +3304,15 @@ static int fec_enet_init(struct net_device *ndev)
cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
GFP_KERNEL);
if (!cbd_base) {
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_queue_mem;
}
/* Get the Ethernet address */
- fec_get_mac(ndev);
+ ret = fec_get_mac(ndev);
+ if (ret)
+ goto free_queue_mem;
+
/* make sure MAC we just acquired is programmed into the hw */
fec_set_mac_address(ndev, NULL);
@@ -3376,6 +3386,10 @@ static int fec_enet_init(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
return 0;
+
+free_queue_mem:
+ fec_enet_free_queue(ndev);
+ return ret;
}
#ifdef CONFIG_OF
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 1753807cbf97..d71eac7e1924 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
u32 tempval;
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
tempval |= FEC_T_CTRL_CAPTURE;
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
- if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
udelay(1);
return readl(fep->hwp + FEC_ATIME);
@@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
fep->ptp_caps.enable = fec_ptp_enable;
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+ if (!fep->cycle_speed) {
+ fep->cycle_speed = NSEC_PER_SEC;
+ dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
+ }
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
spin_lock_init(&fep->tmreg_lock);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 7302498c6df3..bbc423e93122 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -180,7 +180,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
/* Double check we have no extra work.
* Ensure unmask synchronizes with checking for work.
*/
- dma_rmb();
+ mb();
if (block->tx)
reschedule |= gve_tx_poll(block, -1);
if (block->rx)
@@ -220,6 +220,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
int vecs_left = new_num_ntfy_blks % 2;
priv->num_ntfy_blks = new_num_ntfy_blks;
+ priv->mgmt_msix_idx = priv->num_ntfy_blks;
priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
vecs_per_type);
priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
@@ -300,20 +301,22 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
{
int i;
- /* Free the irqs */
- for (i = 0; i < priv->num_ntfy_blks; i++) {
- struct gve_notify_block *block = &priv->ntfy_blocks[i];
- int msix_idx = i;
+ if (priv->msix_vectors) {
+ /* Free the irqs */
+ for (i = 0; i < priv->num_ntfy_blks; i++) {
+ struct gve_notify_block *block = &priv->ntfy_blocks[i];
+ int msix_idx = i;
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- NULL);
- free_irq(priv->msix_vectors[msix_idx].vector, block);
+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+ NULL);
+ free_irq(priv->msix_vectors[msix_idx].vector, block);
+ }
+ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
}
dma_free_coherent(&priv->pdev->dev,
priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
priv->ntfy_blocks, priv->ntfy_block_bus);
priv->ntfy_blocks = NULL;
- free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
pci_disable_msix(priv->pdev);
kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 6938f3a939d6..3e04a3973d68 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -212,10 +212,11 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
tx->dev = &priv->pdev->dev;
if (!tx->raw_addressing) {
tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
-
+ if (!tx->tx_fifo.qpl)
+ goto abort_with_desc;
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
- goto abort_with_desc;
+ goto abort_with_qpl;
}
tx->q_resources =
@@ -236,6 +237,9 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
+abort_with_qpl:
+ if (!tx->raw_addressing)
+ gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -589,7 +593,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
struct gve_tx_ring *tx;
int nsegs;
- WARN(skb_get_queue_mapping(skb) > priv->tx_cfg.num_queues,
+ WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
"skb queue index out of range");
tx = &priv->tx[skb_get_queue_mapping(skb)];
if (unlikely(gve_maybe_stop_tx(tx, skb))) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index da48c05435ea..7e62dcff2426 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -192,7 +192,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev,
}
/**
- *hns_nic_set_link_settings - implement ethtool set link ksettings
+ *hns_nic_set_link_ksettings - implement ethtool set link ksettings
*@net_dev: net_device
*@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
@@ -827,7 +827,7 @@ hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
}
/**
- * get_ethtool_stats - get detail statistics.
+ * hns_get_ethtool_stats - get detail statistics.
* @netdev: net device
* @stats: statistics info.
* @data: statistics data.
@@ -885,7 +885,7 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
}
/**
- * get_strings: Return a set of strings that describe the requested objects
+ * hns_get_strings: Return a set of strings that describe the requested objects
* @netdev: net device
* @stringset: string set ID.
* @data: objects data.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 783fdaf8f8d6..026558f8e04b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -264,22 +264,17 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
- /* initialize the configuration for interrupt coalescing.
- * 1. GL (Interrupt Gap Limiter)
- * 2. RL (Interrupt Rate Limiter)
- * 3. QL (Interrupt Quantity Limiter)
- *
- * Default: enable interrupt coalescing self-adaptive and GL
- */
- tx_coal->adapt_enable = 1;
- rx_coal->adapt_enable = 1;
+ tx_coal->adapt_enable = ptx_coal->adapt_enable;
+ rx_coal->adapt_enable = prx_coal->adapt_enable;
- tx_coal->int_gl = HNS3_INT_GL_50K;
- rx_coal->int_gl = HNS3_INT_GL_50K;
+ tx_coal->int_gl = ptx_coal->int_gl;
+ rx_coal->int_gl = prx_coal->int_gl;
- rx_coal->flow_level = HNS3_FLOW_LOW;
- tx_coal->flow_level = HNS3_FLOW_LOW;
+ rx_coal->flow_level = prx_coal->flow_level;
+ tx_coal->flow_level = ptx_coal->flow_level;
/* device version above V3(include V3), GL can configure 1us
* unit, so uses 1us unit.
@@ -294,8 +289,8 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
rx_coal->ql_enable = 1;
tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
- tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
- rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ tx_coal->int_ql = ptx_coal->int_ql;
+ rx_coal->int_ql = prx_coal->int_ql;
}
}
@@ -846,8 +841,6 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
l4.udp->dest == htons(4790))))
return false;
- skb_checksum_help(skb);
-
return true;
}
@@ -924,8 +917,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
@@ -970,7 +962,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
- break;
+ return skb_checksum_help(skb);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -995,8 +987,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
return 0;
@@ -3844,6 +3835,34 @@ map_ring_fail:
return ret;
}
+static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+
+ /* initialize the configuration for interrupt coalescing.
+ * 1. GL (Interrupt Gap Limiter)
+ * 2. RL (Interrupt Rate Limiter)
+ * 3. QL (Interrupt Quantity Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
+ */
+ tx_coal->adapt_enable = 1;
+ rx_coal->adapt_enable = 1;
+
+ tx_coal->int_gl = HNS3_INT_GL_50K;
+ rx_coal->int_gl = HNS3_INT_GL_50K;
+
+ rx_coal->flow_level = HNS3_FLOW_LOW;
+ tx_coal->flow_level = HNS3_FLOW_LOW;
+
+ if (ae_dev->dev_specs.int_ql_max) {
+ tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ }
+}
+
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -4295,6 +4314,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_get_ring_cfg;
}
+ hns3_nic_init_coal_cfg(priv);
+
ret = hns3_nic_alloc_vector_data(priv);
if (ret) {
ret = -ENOMEM;
@@ -4317,12 +4338,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
if (ret)
goto out_init_phy;
- ret = register_netdev(netdev);
- if (ret) {
- dev_err(priv->dev, "probe register netdev fail!\n");
- goto out_reg_netdev_fail;
- }
-
/* the device can work without cpu rmap, only aRFS needs it */
ret = hns3_set_rx_cpu_rmap(netdev);
if (ret)
@@ -4355,17 +4370,23 @@ static int hns3_client_init(struct hnae3_handle *handle)
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_netdev_fail;
+ }
+
if (netif_msg_drv(handle))
hns3_info_show(priv);
return ret;
+out_reg_netdev_fail:
+ hns3_dbg_uninit(handle);
out_client_start:
hns3_free_rx_cpu_rmap(netdev);
hns3_nic_uninit_irq(priv);
out_init_irq_fail:
- unregister_netdev(netdev);
-out_reg_netdev_fail:
hns3_uninit_phy(netdev);
out_init_phy:
hns3_uninit_all_ring(priv);
@@ -4571,31 +4592,6 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
return 0;
}
-static void hns3_store_coal(struct hns3_nic_priv *priv)
-{
- /* ethtool only support setting and querying one coal
- * configuration for now, so save the vector 0' coal
- * configuration here in order to restore it.
- */
- memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
- sizeof(struct hns3_enet_coalesce));
-}
-
-static void hns3_restore_coal(struct hns3_nic_priv *priv)
-{
- u16 vector_num = priv->vector_num;
- int i;
-
- for (i = 0; i < vector_num; i++) {
- memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
- sizeof(struct hns3_enet_coalesce));
- }
-}
-
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -4654,8 +4650,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_put_ring;
- hns3_restore_coal(priv);
-
ret = hns3_nic_init_vector_data(priv);
if (ret)
goto err_dealloc_vector;
@@ -4721,8 +4715,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_nic_uninit_vector_data(priv);
- hns3_store_coal(priv);
-
hns3_nic_dealloc_vector_data(priv);
hns3_uninit_all_ring(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b48faf769b1c..c1ea403d2b56 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1134,50 +1134,32 @@ static void hns3_get_channels(struct net_device *netdev,
h->ae_algo->ops->get_channels(h, ch);
}
-static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
- struct ethtool_coalesce *cmd)
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
{
- struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
struct hnae3_handle *h = priv->ae_handle;
- u16 queue_num = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev))
return -EBUSY;
- if (queue >= queue_num) {
- netdev_err(netdev,
- "Invalid queue value %u! Queue max id=%u\n",
- queue, queue_num - 1);
- return -EINVAL;
- }
-
- tx_vector = priv->ring[queue].tqp_vector;
- rx_vector = priv->ring[queue_num + queue].tqp_vector;
+ cmd->use_adaptive_tx_coalesce = tx_coal->adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_coal->adapt_enable;
- cmd->use_adaptive_tx_coalesce =
- tx_vector->tx_group.coal.adapt_enable;
- cmd->use_adaptive_rx_coalesce =
- rx_vector->rx_group.coal.adapt_enable;
-
- cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
- cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
+ cmd->tx_coalesce_usecs = tx_coal->int_gl;
+ cmd->rx_coalesce_usecs = rx_coal->int_gl;
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
- cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
- cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
+ cmd->tx_max_coalesced_frames = tx_coal->int_ql;
+ cmd->rx_max_coalesced_frames = rx_coal->int_ql;
return 0;
}
-static int hns3_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
-{
- return hns3_get_coalesce_per_queue(netdev, 0, cmd);
-}
-
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
@@ -1292,19 +1274,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
return ret;
}
- ret = hns3_check_ql_coalesce_param(netdev, cmd);
- if (ret)
- return ret;
-
- if (cmd->use_adaptive_tx_coalesce == 1 ||
- cmd->use_adaptive_rx_coalesce == 1) {
- netdev_info(netdev,
- "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
- cmd->use_adaptive_tx_coalesce,
- cmd->use_adaptive_rx_coalesce);
- }
-
- return 0;
+ return hns3_check_ql_coalesce_param(netdev, cmd);
}
static void hns3_set_coalesce_per_queue(struct net_device *netdev,
@@ -1350,6 +1320,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
u16 queue_num = h->kinfo.num_tqps;
int ret;
int i;
@@ -1364,6 +1337,15 @@ static int hns3_set_coalesce(struct net_device *netdev,
h->kinfo.int_rl_setting =
hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ tx_coal->adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_coal->adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_coal->int_gl = cmd->tx_coalesce_usecs;
+ rx_coal->int_gl = cmd->rx_coalesce_usecs;
+
+ tx_coal->int_ql = cmd->tx_max_coalesced_frames;
+ rx_coal->int_ql = cmd->rx_max_coalesced_frames;
+
for (i = 0; i < queue_num; i++)
hns3_set_coalesce_per_queue(netdev, cmd, i);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 8e5f9dc8791d..f1c9f4ada348 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -710,7 +710,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
unsigned int flag;
int ret = 0;
- memset(&resp_msg, 0, sizeof(resp_msg));
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
@@ -738,6 +737,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
trace_hclge_pf_mbx_get(hdev, req);
+ /* clear the resp_msg before processing every mailbox message */
+ memset(&resp_msg, 0, sizeof(resp_msg));
+
switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index de70c16ef619..b883ab809df3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2313,15 +2313,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = I40E_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 46d884417c63..68f177a86403 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -162,9 +162,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
rcu_read_unlock();
- return result;
+ return I40E_XDP_REDIR;
}
switch (act) {
@@ -173,11 +174,14 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index e35db3ff583b..2924c67567b8 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -335,6 +335,7 @@ struct ice_vsi {
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_ring **xdp_rings; /* XDP ring array */
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -547,15 +548,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
+ struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
- qid -= ring->vsi->num_xdp_txq;
+ qid -= vsi->num_xdp_txq;
- if (!ice_is_xdp_ena_vsi(ring->vsi))
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;
- return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
+ return xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index d9ddd0bcf65f..99301ad95290 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1773,49 +1773,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseKR4_Full);
}
-
- /* Autoneg PHY types */
- if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
}
#define TEST_SET_BITS_TIMEOUT 50
@@ -1972,9 +1929,7 @@ ice_get_link_ksettings(struct net_device *netdev,
ks->base.port = PORT_TP;
break;
case ICE_MEDIA_BACKPLANE:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Backplane);
ks->base.port = PORT_NONE;
@@ -2049,6 +2004,12 @@ ice_get_link_ksettings(struct net_device *netdev,
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ /* Set supported and advertised autoneg */
+ if (ice_is_phy_caps_an_enabled(caps)) {
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ }
+
done:
kfree(caps);
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index de38a0fc9665..9b8300d4a267 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -31,6 +31,7 @@
#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
+#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4))
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400
#define PF_MBX_ARQBAH 0x0022E400
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 82e2ce23df3d..27f9dac8719c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;
+ vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_zc_qps;
+
return 0;
+err_zc_qps:
+ devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
@@ -194,6 +200,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
+ if (vf->num_req_qs)
+ vf->num_vf_qs = vf->num_req_qs;
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_msix_per_vf includes (VF miscellaneous vector +
@@ -288,6 +296,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
+ if (vsi->af_xdp_zc_qps) {
+ bitmap_free(vsi->af_xdp_zc_qps);
+ vsi->af_xdp_zc_qps = NULL;
+ }
/* free the ring and vector containers */
if (vsi->q_vectors) {
devm_kfree(dev, vsi->q_vectors);
@@ -1705,12 +1717,13 @@ setup_rings:
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0;
@@ -1722,7 +1735,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
qg_buf->num_txqs = 1;
- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
goto err_cfg_txqs;
@@ -1742,7 +1755,7 @@ err_cfg_txqs:
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
}
/**
@@ -1757,7 +1770,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
int ret;
int i;
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
if (ret)
return ret;
@@ -1997,17 +2010,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
* @rst_src: reset source
* @rel_vmvf_num: Relative ID of VF/VM
* @rings: Tx ring array to be stopped
+ * @count: number of Tx ring array elements
*/
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring **rings)
+ u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
{
u16 q_idx;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ for (q_idx = 0; q_idx < count; q_idx++) {
struct ice_txq_meta txq_meta = { };
int status;
@@ -2035,7 +2049,7 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num)
{
- return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
+ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
}
/**
@@ -2044,7 +2058,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
*/
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
{
- return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
+ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4ee85a217c6f..0eb2307325d3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2556,6 +2556,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
/**
+ * ice_xdp_safe_mode - XDP handler for safe mode
+ * @dev: netdevice
+ * @xdp: XDP command
+ */
+static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
+ struct netdev_bpf *xdp)
+{
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Please provide working DDP firmware package in order to use XDP\n"
+ "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
+ return -EOPNOTSUPP;
+}
+
+/**
* ice_xdp - implements XDP handler
* @dev: netdevice
* @xdp: XDP command
@@ -6937,6 +6951,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp_safe_mode,
};
static const struct net_device_ops ice_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index e2b4b29ea207..04748aa4c7c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -523,7 +523,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct ice_ring *xdp_ring;
- int err;
+ int err, result;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -532,14 +532,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
return ICE_XDP_PASS;
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- return ice_xmit_xdp_buff(xdp, xdp_ring);
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ if (result == ICE_XDP_CONSUMED)
+ goto out_failure;
+ return result;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ return ICE_XDP_REDIR;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
@@ -2143,6 +2149,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi;
struct ice_tx_buf *first;
+ struct ethhdr *eth;
unsigned int count;
int tso, csum;
@@ -2189,7 +2196,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
goto out_drop;
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
- if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+ eth = (struct ethhdr *)skb_mac_header(skb);
+ if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
vsi->type == ICE_VSI_PF &&
vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index a1d22d2aa0bd..97a46c616aca 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -713,13 +713,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
- * in the case of VFR. If this is done for PFR, it can mess up VF
- * resets because the VF driver may already have started cleanup
- * by the time we get here.
+ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+ * needs to clear them in the case of VFR/VFLR. If this is done for
+ * PFR, it can mess up VF resets because the VF driver may already
+ * have started cleanup by the time we get here.
*/
- if (!is_pfr)
+ if (!is_pfr) {
wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+ wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+ }
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -1698,7 +1700,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_vf_ctrl_vsi_release(vf);
ice_vf_pre_vsi_rebuild(vf);
- ice_vf_rebuild_vsi_with_release(vf);
+
+ if (ice_vf_rebuild_vsi_with_release(vf)) {
+ dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
+ return false;
+ }
+
ice_vf_post_vsi_rebuild(vf);
/* if the VF has been reset allow it to come up again */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index faa7b8d96adb..a1f89ea3c2bd 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -270,6 +270,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool)
return -EINVAL;
+ clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
@@ -300,6 +301,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err)
return err;
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
return 0;
}
@@ -473,9 +476,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
rcu_read_unlock();
- return result;
+ return ICE_XDP_REDIR;
}
switch (act) {
@@ -484,11 +488,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ if (result == ICE_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 7bda8c5edea5..2d3daf022651 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
- struct sk_buff *skb);
+ ktime_t *timestamp);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 038a9fd1af44..b2a042f825ff 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8280,7 +8280,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
struct xdp_buff *xdp,
- union e1000_adv_rx_desc *rx_desc)
+ ktime_t timestamp)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8300,12 +8300,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
if (unlikely(!skb))
return NULL;
- if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
- xdp->data += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
- }
- }
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
/* Determine available headroom for copy */
headlen = size;
@@ -8336,7 +8332,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
struct xdp_buff *xdp,
- union e1000_adv_rx_desc *rx_desc)
+ ktime_t timestamp)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8363,11 +8359,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
if (metasize)
skb_metadata_set(skb, metasize);
- /* pull timestamp out of packet data */
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
- __skb_pull(skb, IGB_TS_HDR_LEN);
- }
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
/* update buffer offset */
#if (PAGE_SIZE < 8192)
@@ -8401,18 +8394,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
break;
case XDP_TX:
result = igb_xdp_xmit_back(adapter, xdp);
+ if (result == IGB_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
- if (!err)
- result = IGB_XDP_REDIR;
- else
- result = IGB_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = IGB_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
@@ -8682,7 +8677,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer;
+ ktime_t timestamp = 0;
+ int pkt_offset = 0;
unsigned int size;
+ void *pktbuf;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -8702,14 +8700,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
dma_rmb();
rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
+ pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ pktbuf, &timestamp);
+
+ pkt_offset += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
/* retrieve a buffer from the ring */
if (!skb) {
- unsigned int offset = igb_rx_offset(rx_ring);
- unsigned char *hard_start;
+ unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
+ unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
- hard_start = page_address(rx_buffer->page) +
- rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
@@ -8732,10 +8740,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
} else if (skb)
igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
+ skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
+ timestamp);
else
skb = igb_construct_skb(rx_ring, rx_buffer,
- &xdp, rx_desc);
+ &xdp, timestamp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ba61fe9bfaf4..d68cd4466a54 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
dev_kfree_skb_any(skb);
}
-#define IGB_RET_PTP_DISABLED 1
-#define IGB_RET_PTP_INVALID 2
-
/**
* igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
- * @skb: Buffer containing timestamp and packet
+ * @timestamp: Pointer where timestamp will be stored
*
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
* byte 8
*
- * Returns: 0 if success, nonzero if failure
+ * Returns: The timestamp header length or 0 if not available
**/
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
- struct sk_buff *skb)
+ ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
+ struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
- return IGB_RET_PTP_DISABLED;
+ return 0;
/* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
/* check reserved dwords are zero, be/le doesn't matter for zero */
if (regval[0])
- return IGB_RET_PTP_INVALID;
+ return 0;
- igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
- le64_to_cpu(regval[1]));
+ igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
if (adapter->hw.mac.type == e1000_i210) {
@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
break;
}
}
- skb_hwtstamps(skb)->hwtstamp =
- ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
- return 0;
+ *timestamp = ktime_sub_ns(ts.hwtstamp, adjust);
+
+ return IGB_TS_HDR_LEN;
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 069471b7ffb0..f1adf154ec4a 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2047,20 +2047,19 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
break;
case XDP_TX:
if (igc_xdp_xmit_back(adapter, xdp) < 0)
- res = IGC_XDP_CONSUMED;
- else
- res = IGC_XDP_TX;
+ goto out_failure;
+ res = IGC_XDP_TX;
break;
case XDP_REDIRECT:
if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
- res = IGC_XDP_CONSUMED;
- else
- res = IGC_XDP_REDIRECT;
+ goto out_failure;
+ res = IGC_XDP_REDIRECT;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(adapter->netdev, prog, act);
fallthrough;
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c5ec17d19c59..2ac5b82676f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2213,23 +2213,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- result = IXGBE_XDP_CONSUMED;
- break;
- }
+ if (unlikely(!xdpf))
+ goto out_failure;
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ if (result == IXGBE_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
- if (!err)
- result = IXGBE_XDP_REDIR;
- else
- result = IXGBE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = IXGBE_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 988db46bff0e..214a38de3f41 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -467,12 +467,16 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return err;
}
-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- int max_frame = msgbuf[1];
u32 max_frs;
+ if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return -EINVAL;
+ }
+
/*
* For 82599EB we have to keep all PFs and VFs operating with
* the same max_frame value in order to avoid sending an oversize
@@ -533,12 +537,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
}
}
- /* MTU < 68 is an error and causes problems on some kernels */
- if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
- e_err(drv, "VF max_frame %d out of range\n", max_frame);
- return -EINVAL;
- }
-
/* pull current max frame size from hardware */
max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
max_frs &= IXGBE_MHADD_MFS_MASK;
@@ -1249,7 +1247,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_LPE:
- retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
+ retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
break;
case IXGBE_VF_SET_MACVLAN:
retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 91ad5b902673..f72d2978263b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -106,9 +106,10 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
rcu_read_unlock();
- return result;
+ return IXGBE_XDP_REDIR;
}
switch (act) {
@@ -116,16 +117,17 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- result = IXGBE_XDP_CONSUMED;
- break;
- }
+ if (unlikely(!xdpf))
+ goto out_failure;
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ if (result == IXGBE_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ba2ed8a43d2d..0e733cc15c58 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1067,11 +1067,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
case XDP_TX:
xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
+ if (result == IXGBEVF_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 6f987a7ffcb3..b30a45725374 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1315,23 +1315,23 @@ static int korina_probe(struct platform_device *pdev)
lp->tx_irq = platform_get_irq_byname(pdev, "tx");
p = devm_platform_ioremap_resource_byname(pdev, "emac");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->eth_regs = p;
p = devm_platform_ioremap_resource_byname(pdev, "dma_rx");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->rx_dma_regs = p;
p = devm_platform_ioremap_resource_byname(pdev, "dma_tx");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->tx_dma_regs = p;
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 41c2ad210bc9..21ef2f128070 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -154,6 +154,8 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
+ struct sk_buff *skb = ch->skb[ch->dma.desc];
+ dma_addr_t mapping;
int ret = 0;
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
@@ -163,16 +165,18 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
goto skip;
}
- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
- ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev,
- ch->dma.desc_base[ch->dma.desc].addr))) {
+ mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
+ XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ ch->skb[ch->dma.desc] = skb;
ret = -ENOMEM;
goto skip;
}
+ ch->dma.desc_base[ch->dma.desc].addr = mapping;
+ /* Make sure the address is written before we give it to HW */
+ wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
@@ -196,6 +200,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
ch->dma.desc %= LTQ_DESC_NUM;
if (ret) {
+ net_dev->stats.rx_dropped++;
netdev_err(net_dev, "failed to allocate new rx buffer\n");
return ret;
}
@@ -348,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
struct xrx200_chan *ch = ptr;
if (napi_schedule_prep(&ch->napi)) {
- __napi_schedule(&ch->napi);
ltq_dma_disable_irq(&ch->dma);
+ __napi_schedule(&ch->napi);
}
ltq_dma_ack_irq(&ch->dma);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 8edba5ea90f0..4a61c90003b5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -993,6 +993,14 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+ (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
struct mvpp2_tai;
/* Definitions */
@@ -1002,6 +1010,20 @@ struct mvpp2_rss_table {
u32 indir[MVPP22_RSS_TABLE_ENTRIES];
};
+struct mvpp2_buff_hdr {
+ __le32 next_phys_addr;
+ __le32 next_dma_addr;
+ __le16 byte_count;
+ __le16 info;
+ __le16 reserved1; /* bm_qset (for future use, BM) */
+ u8 next_phys_addr_high;
+ u8 next_dma_addr_high;
+ __le16 reserved2;
+ __le16 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+};
+
/* Shared Packet Processor resources */
struct mvpp2 {
/* Shared registers' base addresses */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ec706d614cac..d39c7639cdba 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3839,6 +3839,35 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
return ret;
}
+static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
+ int pool, u32 rx_status)
+{
+ phys_addr_t phys_addr, phys_addr_next;
+ dma_addr_t dma_addr, dma_addr_next;
+ struct mvpp2_buff_hdr *buff_hdr;
+
+ phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+
+ do {
+ buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
+
+ phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
+ dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
+
+ if (port->priv->hw_version >= MVPP22) {
+ phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
+ dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
+ }
+
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+
+ phys_addr = phys_addr_next;
+ dma_addr = dma_addr_next;
+
+ } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
+}
+
/* Main rx processing */
static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
int rx_todo, struct mvpp2_rx_queue *rxq)
@@ -3885,14 +3914,6 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
MVPP2_RXD_BM_POOL_ID_OFFS;
bm_pool = &port->priv->bm_pools[pool];
- /* In case of an error, release the requested buffer pointer
- * to the Buffer Manager. This request process is controlled
- * by the hardware, and the information about the buffer is
- * comprised by the RX descriptor.
- */
- if (rx_status & MVPP2_RXD_ERR_SUMMARY)
- goto err_drop_frame;
-
if (port->priv->percpu_pools) {
pp = port->priv->page_pool[pool];
dma_dir = page_pool_get_dma_dir(pp);
@@ -3904,6 +3925,18 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
rx_bytes + MVPP2_MH_SIZE,
dma_dir);
+ /* Buffer header not supported */
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ goto err_drop_frame;
+
+ /* In case of an error, release the requested buffer pointer
+ * to the Buffer Manager. This request process is controlled
+ * by the hardware, and the information about the buffer is
+ * comprised by the RX descriptor.
+ */
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+ goto err_drop_frame;
+
/* Prefetch header */
prefetch(data);
@@ -3985,7 +4018,10 @@ err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
+ else
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
rcu_read_unlock();
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index f4962a97a075..9d9a2e438acf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -786,6 +786,10 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ *rss_context >= MAX_RSS_GROUPS)
+ return -EINVAL;
+
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ed4eacef17ce..64adfd24e134 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -681,32 +681,53 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
void mtk_stats_update_mac(struct mtk_mac *mac)
{
struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = MTK_GDM1_TX_GBCNT;
- u64 stats;
-
- base += hw_stats->reg_offset;
+ struct mtk_eth *eth = mac->hw;
u64_stats_update_begin(&hw_stats->syncp);
- hw_stats->rx_bytes += mtk_r32(mac->hw, base);
- stats = mtk_r32(mac->hw, base + 0x04);
- if (stats)
- hw_stats->rx_bytes += (stats << 32);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x24);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
- stats = mtk_r32(mac->hw, base + 0x34);
- if (stats)
- hw_stats->tx_bytes += (stats << 32);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
+ hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
+ hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
+ hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
+ } else {
+ unsigned int offs = hw_stats->reg_offset;
+ u64 stats;
+
+ hw_stats->rx_bytes += mtk_r32(mac->hw,
+ MTK_GDM1_RX_GBCNT_L + offs);
+ stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
+ if (stats)
+ hw_stats->rx_bytes += (stats << 32);
+ hw_stats->rx_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
+ hw_stats->rx_overflow +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
+ hw_stats->rx_fcs_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
+ hw_stats->rx_short_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
+ hw_stats->rx_long_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
+ hw_stats->rx_flow_control_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
+ stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
+ }
+
u64_stats_update_end(&hw_stats->syncp);
}
@@ -2423,7 +2444,8 @@ static void mtk_dim_rx(struct work_struct *work)
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
spin_unlock_bh(&eth->dim_lock);
@@ -2452,7 +2474,8 @@ static void mtk_dim_tx(struct work_struct *work)
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
spin_unlock_bh(&eth->dim_lock);
@@ -2480,6 +2503,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
goto err_disable_pm;
}
+ /* set interrupt delays based on current Net DIM sample */
+ mtk_dim_rx(&eth->rx_dim.work);
+ mtk_dim_tx(&eth->tx_dim.work);
+
/* disable delay and normal interrupt */
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 11331b44ba07..5ef70dd8b49c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -278,8 +278,21 @@
/* QDMA FQ Free Page Buffer Length Register */
#define MTK_QDMA_FQ_BLEN 0x1B2C
-/* GMA1 Received Good Byte Count Register */
-#define MTK_GDM1_TX_GBCNT 0x2400
+/* GMA1 counter / statics register */
+#define MTK_GDM1_RX_GBCNT_L 0x2400
+#define MTK_GDM1_RX_GBCNT_H 0x2404
+#define MTK_GDM1_RX_GPCNT 0x2408
+#define MTK_GDM1_RX_OERCNT 0x2410
+#define MTK_GDM1_RX_FERCNT 0x2414
+#define MTK_GDM1_RX_SERCNT 0x2418
+#define MTK_GDM1_RX_LENCNT 0x241c
+#define MTK_GDM1_RX_CERCNT 0x2420
+#define MTK_GDM1_RX_FCCNT 0x2424
+#define MTK_GDM1_TX_SKIPCNT 0x2428
+#define MTK_GDM1_TX_COLCNT 0x242c
+#define MTK_GDM1_TX_GBCNT_L 0x2430
+#define MTK_GDM1_TX_GBCNT_H 0x2434
+#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40
/* QDMA descriptor txd4 */
@@ -502,6 +515,13 @@
#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+/* Counter / stat register */
+#define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100)
+#define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104)
+#define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108)
+#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
+#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 1434df66fcf2..3616b77caa0a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2027,8 +2027,6 @@ static int mlx4_en_set_tunable(struct net_device *dev,
return ret;
}
-#define MLX4_EEPROM_PAGE_LEN 256
-
static int mlx4_en_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -2063,7 +2061,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f6cfec81ccc3..dc4ac1a2b6b6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
+#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (mlx4_is_mfunc(dev))
disable_unsupported_roce_caps(outbox);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
+ dev_cap->map_clock_to_user = field & 0x80;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
dev_cap->reserved_qps = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 8f020f26ebf5..cf64e54eecb0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
u32 health_buffer_addrs;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1];
+ bool map_clock_to_user;
};
struct mlx4_func_cap {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index c326b434734e..00c84656b2e7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
+ dev->caps.map_clock_to_user = dev_cap->map_clock_to_user;
dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
if (mlx4_is_slave(dev))
return -EOPNOTSUPP;
+ if (!dev->caps.map_clock_to_user) {
+ mlx4_dbg(dev, "Map clock to user is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
if (!params)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index ba6ac31a339d..256a06b3c096 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1973,6 +1973,7 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
#define I2C_ADDR_LOW 0x50
#define I2C_ADDR_HIGH 0x51
#define I2C_PAGE_SIZE 256
+#define I2C_HIGH_PAGE_SIZE 128
/* Module Info Data */
struct mlx4_cable_info {
@@ -2026,6 +2027,88 @@ static inline const char *cable_info_mad_err_str(u16 mad_status)
return "Unknown Error";
}
+static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
+{
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_mad_ifc *inmad, *outmad;
+ struct mlx4_cable_info *cable_info;
+ int ret;
+
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ mlx4_free_cmd_mailbox(dev, inbox);
+ return PTR_ERR(outbox);
+ }
+
+ inmad = (struct mlx4_mad_ifc *)(inbox->buf);
+ outmad = (struct mlx4_mad_ifc *)(outbox->buf);
+
+ inmad->method = 0x1; /* Get */
+ inmad->class_version = 0x1;
+ inmad->mgmt_class = 0x1;
+ inmad->base_version = 0x1;
+ inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
+
+ cable_info = (struct mlx4_cable_info *)inmad->data;
+ cable_info->dev_mem_address = 0;
+ cable_info->page_num = 0;
+ cable_info->i2c_addr = I2C_ADDR_LOW;
+ cable_info->size = cpu_to_be16(1);
+
+ ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ if (be16_to_cpu(outmad->status)) {
+ /* Mad returned with bad status */
+ ret = be16_to_cpu(outmad->status);
+ mlx4_warn(dev,
+ "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
+ 0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
+ cable_info_mad_err_str(ret));
+ ret = -ret;
+ goto out;
+ }
+ cable_info = (struct mlx4_cable_info *)outmad->data;
+ *module_id = cable_info->data[0];
+out:
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ *i2c_addr = I2C_ADDR_LOW;
+ *page_num = 0;
+
+ if (*offset < I2C_PAGE_SIZE)
+ return;
+
+ *i2c_addr = I2C_ADDR_HIGH;
+ *offset -= I2C_PAGE_SIZE;
+}
+
+static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ /* Offsets 0-255 belong to page 0.
+ * Offsets 256-639 belong to pages 01, 02, 03.
+ * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
+ */
+ if (*offset < I2C_PAGE_SIZE)
+ *page_num = 0;
+ else
+ *page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
+ *i2c_addr = I2C_ADDR_LOW;
+ *offset -= *page_num * I2C_HIGH_PAGE_SIZE;
+}
+
/**
* mlx4_get_module_info - Read cable module eeprom data
* @dev: mlx4_dev.
@@ -2045,12 +2128,30 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_mad_ifc *inmad, *outmad;
struct mlx4_cable_info *cable_info;
- u16 i2c_addr;
+ u8 module_id, i2c_addr, page_num;
int ret;
if (size > MODULE_INFO_MAX_READ)
size = MODULE_INFO_MAX_READ;
+ ret = mlx4_get_module_id(dev, port, &module_id);
+ if (ret)
+ return ret;
+
+ switch (module_id) {
+ case MLX4_MODULE_ID_SFP:
+ mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ case MLX4_MODULE_ID_QSFP:
+ case MLX4_MODULE_ID_QSFP_PLUS:
+ case MLX4_MODULE_ID_QSFP28:
+ mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ default:
+ mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
+ return -EINVAL;
+ }
+
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
@@ -2076,11 +2177,9 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
*/
size -= offset + size - I2C_PAGE_SIZE;
- i2c_addr = I2C_ADDR_LOW;
-
cable_info = (struct mlx4_cable_info *)inmad->data;
cable_info->dev_mem_address = cpu_to_be16(offset);
- cable_info->page_num = 0;
+ cable_info->page_num = page_num;
cable_info->i2c_addr = i2c_addr;
cable_info->size = cpu_to_be16(size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a9166cd85013..ceebfc20f65e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -303,6 +303,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
int ret = 0, i;
mutex_lock(&mlx5_intf_mutex);
+ priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
bool is_supported = false;
@@ -320,6 +321,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
}
} else {
adev = &priv->adev[i]->adev;
+
+ /* Pay attention that this is not PCI driver that
+ * mlx5_core_dev is connected, but auxiliary driver.
+ *
+ * Here we can race of module unload with devlink
+ * reload, but we don't need to take extra lock because
+ * we are holding global mlx5_intf_mutex.
+ */
+ if (!adev->dev.driver)
+ continue;
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->resume)
@@ -350,6 +361,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
continue;
adev = &priv->adev[i]->adev;
+ /* Auxiliary driver was unbind manually through sysfs */
+ if (!adev->dev.driver)
+ goto skip_suspend;
+
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->suspend) {
@@ -357,9 +372,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
continue;
}
+skip_suspend:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
+ priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
}
@@ -448,6 +465,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
struct mlx5_priv *priv = &dev->priv;
lockdep_assert_held(&mlx5_intf_mutex);
+ if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
+ return 0;
delete_drivers(dev);
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 0dd7615e5931..bc33eaada3b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -64,6 +64,8 @@ struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
struct mlx5e_priv *priv = netdev_priv(dev);
struct devlink_port *port;
+ if (!netif_device_present(dev))
+ return NULL;
port = mlx5e_devlink_get_dl_port(priv);
if (port->registered)
return port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index d907c1acd4d5..778e229310a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2020 Mellanox Technologies
-#include <linux/ptp_classify.h>
#include "en/ptp.h"
#include "en/txrx.h"
#include "en/params.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index ab935cce952b..c96668bd701c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -6,6 +6,7 @@
#include "en.h"
#include "en_stats.h"
+#include <linux/ptp_classify.h>
struct mlx5e_ptpsq {
struct mlx5e_txqsq txqsq;
@@ -43,6 +44,27 @@ struct mlx5e_ptp {
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
};
+static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
+{
+ struct flow_keys fk;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return false;
+
+ if (fk.basic.n_proto == htons(ETH_P_1588))
+ return true;
+
+ if (fk.basic.n_proto != htons(ETH_P_IP) &&
+ fk.basic.n_proto != htons(ETH_P_IPV6))
+ return false;
+
+ return (fk.basic.ip_proto == IPPROTO_UDP &&
+ fk.ports.dst == htons(PTP_EV_PORT));
+}
+
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
u8 lag_port, struct mlx5e_ptp **cp);
void mlx5e_ptp_close(struct mlx5e_ptp *c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 95f2b26a3ee3..9c076aa20306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -223,6 +223,8 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
rpriv = priv->ppriv;
fwd_vport_num = rpriv->rep->vport;
lag_dev = netdev_master_upper_dev_get(netdev);
+ if (!lag_dev)
+ return;
netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
index be0ee03de721..2e9bee4e5209 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
@@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
work);
struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
struct neighbour *n = update_work->n;
+ struct mlx5e_encap_entry *e = NULL;
bool neigh_connected, same_dev;
- struct mlx5e_encap_entry *e;
unsigned char ha[ETH_ALEN];
- struct mlx5e_priv *priv;
u8 nud_state, dead;
rtnl_lock();
@@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
if (!same_dev)
goto out;
- list_for_each_entry(e, &nhe->encap_list, encap_list) {
- if (!mlx5e_encap_take(e))
- continue;
+ /* mlx5e_get_next_init_encap() releases previous encap before returning
+ * the next one.
+ */
+ while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
+ mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
- priv = netdev_priv(e->out_dev);
- mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
- mlx5e_encap_put(priv, e);
- }
out:
rtnl_unlock();
mlx5e_release_neigh_update_work(update_work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 6cdc52d50a48..85eaadc989df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -94,13 +94,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
ASSERT_RTNL();
- /* wait for encap to be fully initialized */
- wait_for_completion(&e->res_ready);
-
mutex_lock(&esw->offloads.encap_tbl_lock);
encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- if (e->compl_result < 0 || (encap_connected == neigh_connected &&
- ether_addr_equal(e->h_dest, ha)))
+ if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
goto unlock;
mlx5e_take_all_encap_flows(e, &flow_list);
@@ -626,7 +622,7 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
struct mlx5_eswitch *esw;
u32 zone_restore_id;
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ tc_skb_ext = tc_skb_ext_alloc(skb);
if (!tc_skb_ext) {
WARN_ON(1);
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 593503bc4d07..490131e06efb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -251,9 +251,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
mlx5e_take_tmp_flow(flow, flow_list, 0);
}
+typedef bool (match_cb)(struct mlx5e_encap_entry *);
+
static struct mlx5e_encap_entry *
-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
- struct mlx5e_encap_entry *e)
+mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e,
+ match_cb match)
{
struct mlx5e_encap_entry *next = NULL;
@@ -288,7 +291,7 @@ retry:
/* wait for encap to be fully initialized */
wait_for_completion(&next->res_ready);
/* continue searching if encap entry is not in valid state after completion */
- if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ if (!match(next)) {
e = next;
goto retry;
}
@@ -296,6 +299,30 @@ retry:
return next;
}
+static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
+{
+ return e->flags & MLX5_ENCAP_ENTRY_VALID;
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
+}
+
+static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
+{
+ return e->compl_result >= 0;
+}
+
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
+}
+
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
@@ -1505,7 +1532,7 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
fen_info = container_of(info, struct fib_entry_notifier_info, info);
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
- if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+ if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
fen_info->dst_len != 32)
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 3d45341e2216..26f7fab109d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev;
- if (!priv->ipsec)
- return;
-
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 5cd466ec6492..25403af32859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -356,7 +356,7 @@ err:
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
{
- int err = 0;
+ int err = -ENOMEM;
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8360289813f0..d6513aef5cd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1624,12 +1624,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ unsigned long fec_bitmap;
u16 fec_policy = 0;
int mode;
int err;
- if (bitmap_weight((unsigned long *)&fecparam->fec,
- ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+ bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
+ if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
return -EOPNOTSUPP;
for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
@@ -1893,6 +1894,13 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
+ if (new_val && !priv->profile->rx_ptp_support &&
+ priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+ netdev_err(priv->netdev,
+ "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
+ return -EINVAL;
+ }
+
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 0d571a0c76d9..0b75fab41ae8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -35,6 +35,7 @@
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
#include "en.h"
#include "en_rep.h"
#include "lib/mpfs.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bca832cdc4cb..d26b8ed51195 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -889,10 +889,13 @@ err_free_rq:
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- if (rq->icosq)
+ if (rq->icosq) {
mlx5e_trigger_irq(rq->icosq);
- else
+ } else {
+ local_bh_disable();
napi_schedule(rq->cq.napi);
+ local_bh_enable();
+ }
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -2697,13 +2700,11 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
int err;
old_num_txqs = netdev->real_num_tx_queues;
- old_ntc = netdev->num_tc;
+ old_ntc = netdev->num_tc ? : 1;
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
num_rxqs = nch * priv->profile->rq_groups;
- if (priv->channels.params.ptp_rx)
- num_rxqs++;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
@@ -3855,6 +3856,16 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
}
+ if (mlx5e_is_uplink_rep(priv)) {
+ features &= ~NETIF_F_HW_TLS_RX;
+ if (netdev->features & NETIF_F_HW_TLS_RX)
+ netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+ features &= ~NETIF_F_HW_TLS_TX;
+ if (netdev->features & NETIF_F_HW_TLS_TX)
+ netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+ }
+
mutex_unlock(&priv->state_lock);
return features;
@@ -3971,11 +3982,45 @@ int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
return mlx5e_ptp_rx_manage_fs(priv, set);
}
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
+{
+ bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
+ int err;
+
+ if (!rx_filter)
+ /* Reset CQE compression to Admin default */
+ return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+
+ if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+ return 0;
+
+ /* Disable CQE compression */
+ netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ if (err)
+ netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+
+ return err;
+}
+
+static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
{
struct mlx5e_params new_params;
+
+ if (ptp_rx == priv->channels.params.ptp_rx)
+ return 0;
+
+ new_params = priv->channels.params;
+ new_params.ptp_rx = ptp_rx;
+ return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+ &new_params.ptp_rx, true);
+}
+
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
struct hwtstamp_config config;
bool rx_cqe_compress_def;
+ bool ptp_rx;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
@@ -3995,13 +4040,12 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
}
mutex_lock(&priv->state_lock);
- new_params = priv->channels.params;
rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- new_params.ptp_rx = false;
+ ptp_rx = false;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -4018,24 +4062,25 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- new_params.ptp_rx = rx_cqe_compress_def;
config.rx_filter = HWTSTAMP_FILTER_ALL;
+ /* ptp_rx is set if both HW TS is set and CQE
+ * compression is set
+ */
+ ptp_rx = rx_cqe_compress_def;
break;
default:
- mutex_unlock(&priv->state_lock);
- return -ERANGE;
+ err = -ERANGE;
+ goto err_unlock;
}
- if (new_params.ptp_rx == priv->channels.params.ptp_rx)
- goto out;
+ if (!priv->profile->rx_ptp_support)
+ err = mlx5e_hwstamp_config_no_ptp_rx(priv,
+ config.rx_filter != HWTSTAMP_FILTER_NONE);
+ else
+ err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
+ if (err)
+ goto err_unlock;
- err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
- &new_params.ptp_rx, true);
- if (err) {
- mutex_unlock(&priv->state_lock);
- return err;
- }
-out:
memcpy(&priv->tstamp, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
@@ -4044,6 +4089,9 @@ out:
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
+err_unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
}
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -4774,22 +4822,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
}
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
- netdev->hw_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
- netdev->hw_enc_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_features |= NETIF_F_GSO_GRE;
+ netdev->hw_enc_features |= NETIF_F_GSO_GRE;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
@@ -5229,6 +5270,11 @@ static void mlx5e_update_features(struct net_device *netdev)
rtnl_unlock();
}
+static void mlx5e_reset_channels(struct net_device *netdev)
+{
+ netdev_reset_tc(netdev);
+}
+
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
@@ -5283,6 +5329,7 @@ err_cleanup_tx:
profile->cleanup_tx(priv);
out:
+ mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
@@ -5300,6 +5347,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
profile->cleanup_rx(priv);
profile->cleanup_tx(priv);
+ mlx5e_reset_channels(priv->netdev);
cancel_work_sync(&priv->update_stats_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 47a9c49b25fd..d4b0f270b6bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1322,10 +1322,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
bool vf_tun = false, encap_valid = true;
+ struct net_device *encap_dev = NULL;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
@@ -1371,16 +1371,22 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr = attr->esw_attr;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ struct net_device *out_dev;
int mirred_ifindex;
if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = parse_attr->mirred_ifindex[out_index];
- out_dev = __dev_get_by_index(dev_net(priv->netdev),
- mirred_ifindex);
+ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+ err = -ENODEV;
+ goto err_out;
+ }
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
+ dev_put(out_dev);
if (err)
goto err_out;
@@ -1393,6 +1399,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->dests[out_index].mdev = out_priv->mdev;
}
+ if (vf_tun && esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
goto err_out;
@@ -2003,11 +2015,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters_3);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
+ enum fs_flow_table_type fs_type;
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
int err;
+ fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
match_level = outer_match_level;
if (dissector->used_keys &
@@ -2133,6 +2147,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (match.mask->vlan_id ||
match.mask->vlan_priority ||
match.mask->vlan_tpid) {
+ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
+ fs_type)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on CVLAN is not supported");
+ return -EOPNOTSUPP;
+ }
+
if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_svlan_tag, 1);
@@ -3526,8 +3547,12 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
if (err)
return err;
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
- dev_get_iflink(vlan_dev));
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+
if (is_vlan_dev(*out_dev))
err = add_vlan_push_action(priv, attr, out_dev, action);
@@ -4740,7 +4765,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
- hpe->hp->pair->peer_gone = true;
+ mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
mlx5e_hairpin_put(priv, hpe);
}
@@ -5074,7 +5099,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
chain = mapped_obj.chain;
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ tc_skb_ext = tc_skb_ext_alloc(skb);
if (WARN_ON(!tc_skb_ext))
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 25c091795bcd..17027536efba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
struct mlx5e_neigh_hash_entry;
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e);
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 8ba62671f5f1..320fe0cda917 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,7 +32,6 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
-#include <linux/ptp_classify.h>
#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
@@ -67,24 +66,6 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
}
#endif
-static bool mlx5e_use_ptpsq(struct sk_buff *skb)
-{
- struct flow_keys fk;
-
- if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
- return false;
-
- if (fk.basic.n_proto == htons(ETH_P_1588))
- return true;
-
- if (fk.basic.n_proto != htons(ETH_P_IP) &&
- fk.basic.n_proto != htons(ETH_P_IPV6))
- return false;
-
- return (fk.basic.ip_proto == IPPROTO_UDP &&
- fk.ports.dst == htons(PTP_EV_PORT));
-}
-
static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -145,9 +126,9 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
}
ptp_channel = READ_ONCE(priv->channels.ptp);
- if (unlikely(ptp_channel) &&
- test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
- mlx5e_use_ptpsq(skb))
+ if (unlikely(ptp_channel &&
+ test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
+ mlx5e_use_ptpsq(skb)))
return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 77c0ca655975..940333410267 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
eqe = next_eqe_sw(eq);
if (!eqe)
- return 0;
+ goto out;
do {
struct mlx5_core_cq *cq;
@@ -161,6 +161,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+
+out:
eq_update_ci(eq, 1);
if (cqn != -1)
@@ -248,9 +250,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
- eq_update_ci(eq, 1);
out:
+ eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
return unlikely(recovery) ? num_eqes : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 570f2280823c..97e6cb6f13c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
#include "esw/acl/lgcy.h"
#include "esw/legacy.h"
#include "mlx5_core.h"
@@ -1053,6 +1054,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
goto err_vhca_mapping;
}
+ /* External controller host PF has factory programmed MAC.
+ * Read it from the device.
+ */
+ if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
+ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index db1e74280e57..d18a28a6e9a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -219,7 +219,8 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
struct mlx5_fs_chains *chains,
int i)
{
- flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index a81ece94f599..b45954905845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -65,7 +65,7 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
- int err;
+ int err, err2;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
@@ -76,33 +76,34 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
- ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED |
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.prio = FDB_TC_OFFLOAD;
ft_attr.max_fte = 1;
+ ft_attr.level = 1;
ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
- esw_warn(dev, "Failed to create termination table (error %d)\n",
- IS_ERR(tt->termtbl));
- return -EOPNOTSUPP;
+ err = PTR_ERR(tt->termtbl);
+ esw_warn(dev, "Failed to create termination table, err %pe\n", tt->termtbl);
+ return err;
}
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
if (IS_ERR(tt->rule)) {
- esw_warn(dev, "Failed to create termination table rule (error %d)\n",
- IS_ERR(tt->rule));
+ err = PTR_ERR(tt->rule);
+ esw_warn(dev, "Failed to create termination table rule, err %pe\n", tt->rule);
goto add_flow_err;
}
return 0;
add_flow_err:
- err = mlx5_destroy_flow_table(tt->termtbl);
- if (err)
- esw_warn(dev, "Failed to destroy termination table\n");
+ err2 = mlx5_destroy_flow_table(tt->termtbl);
+ if (err2)
+ esw_warn(dev, "Failed to destroy termination table, err %d\n", err2);
- return -EOPNOTSUPP;
+ return err;
}
static struct mlx5_termtbl_handle *
@@ -172,19 +173,6 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
}
}
-static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
-{
- switch (rt->reformat_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
- case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
- case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
- return true;
- default:
- return false;
- }
-}
-
static void
mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
struct mlx5_flow_act *dst)
@@ -202,14 +190,6 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
}
}
-
- if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
- mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
- src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dst->pkt_reformat = src->pkt_reformat;
- src->pkt_reformat = NULL;
- }
}
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
@@ -238,6 +218,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
int i;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
!mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false;
@@ -279,12 +260,19 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
+ if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
+ term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
+ } else {
+ term_tbl_act.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ term_tbl_act.pkt_reformat = NULL;
+ }
+
/* get the terminating table for the action list */
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i], attr);
if (IS_ERR(tt)) {
- esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
- IS_ERR(tt));
+ esw_warn(esw->dev, "Failed to get termination table, err %pe\n", tt);
goto revert_changes;
}
attr->dests[num_vport_dests].termtbl = tt;
@@ -301,6 +289,9 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
goto revert_changes;
/* create the FTE */
+ flow_act->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = NULL;
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
if (IS_ERR(rule))
goto revert_changes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index d5d57630015f..106b50e42b46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -349,6 +349,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
reset_abort_work);
struct mlx5_core_dev *dev = fw_reset->dev;
+ if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+ return;
+
mlx5_sync_reset_clear_reset_requested(dev, true);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 2c41a6920264..fd6196b5e163 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -307,6 +307,11 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
struct lag_mp *mp = &ldev->lag_mp;
int err;
+ /* always clear mfi, as it might become stale when a route delete event
+ * has been missed
+ */
+ mp->mfi = NULL;
+
if (mp->fib_nb.notifier_call)
return 0;
@@ -335,4 +340,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
unregister_fib_notifier(&init_net, &mp->fib_nb);
destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
+ mp->mfi = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 00ef10a1a9f8..20a4047f2737 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -107,7 +107,7 @@ bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
}
-static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
{
return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
index e96f345e7dae..d50bdb226cef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
@@ -28,6 +28,7 @@ struct mlx5_chains_attr {
bool
mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains);
bool
mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
u32
@@ -70,6 +71,10 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
#else /* CONFIG_MLX5_CLS_ACT */
+static inline bool
+mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{ return false; }
+
static inline struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level) { return ERR_PTR(-EOPNOTSUPP); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index fd8449ff9e17..839a01da110f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -33,6 +33,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/mpfs.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "lib/mpfs.h"
@@ -175,6 +176,7 @@ out:
mutex_unlock(&mpfs->lock);
return err;
}
+EXPORT_SYMBOL(mlx5_mpfs_add_mac);
int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
{
@@ -206,3 +208,4 @@ unlock:
mutex_unlock(&mpfs->lock);
return err;
}
+EXPORT_SYMBOL(mlx5_mpfs_del_mac);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
index 4a7b2c3203a7..4a293542a7aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
@@ -84,12 +84,9 @@ struct l2addr_node {
#ifdef CONFIG_MLX5_MPFS
int mlx5_mpfs_init(struct mlx5_core_dev *dev);
void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
-int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
#else /* #ifndef CONFIG_MLX5_MPFS */
static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
-static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
-static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
#endif
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c114365eb126..0d0f63a27aba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -503,7 +503,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
- struct mlx5_profile *prof = dev->profile;
+ struct mlx5_profile *prof = &dev->profile;
void *set_hca_cap;
int err;
@@ -524,11 +524,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
to_fw_pkey_sz(dev, 128));
/* Check log_max_qp from HCA caps to set in current profile */
- if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
+ if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
- profile[prof_sel].log_max_qp,
+ prof->log_max_qp,
MLX5_CAP_GEN_MAX(dev, log_max_qp));
- profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
}
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
@@ -1161,7 +1161,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_core_set_hca_defaults(dev);
if (err) {
mlx5_core_err(dev, "Failed to set hca defaults\n");
- goto err_sriov;
+ goto err_set_hca;
}
mlx5_vhca_event_start(dev);
@@ -1194,6 +1194,7 @@ err_ec:
mlx5_sf_hw_table_destroy(dev);
err_vhca:
mlx5_vhca_event_stop(dev);
+err_set_hca:
mlx5_cleanup_fs(dev);
err_fs:
mlx5_accel_tls_cleanup(dev);
@@ -1381,8 +1382,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
struct mlx5_priv *priv = &dev->priv;
int err;
- dev->profile = &profile[profile_idx];
-
+ memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
mutex_init(&dev->intf_state_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 50af84e76fb6..174f71ed5280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->key |= mlx5_idx_to_mkey(mkey_index);
+ mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
mkey->pd = MLX5_GET(mkc, mkc, pd);
init_waitqueue_head(&mkey->wait);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 1f907df5b3a2..c3373fb1cd7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -95,9 +95,10 @@ int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
int msix_vec_count)
{
- int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap = NULL, *query_cap = NULL, *cap;
int num_vf_msix, min_msix, max_msix;
- void *hca_cap, *cap;
int ret;
num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
@@ -116,11 +117,20 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
if (msix_vec_count > max_msix)
return -EOVERFLOW;
- hca_cap = kzalloc(sz, GFP_KERNEL);
- if (!hca_cap)
- return -ENOMEM;
+ query_cap = kzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
+ if (ret)
+ goto out;
cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
@@ -130,7 +140,9 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+out:
kfree(hca_cap);
+ kfree(query_cap);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 441b5453acae..540cf05f6373 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
+ if (!MLX5_CAP_GEN(dev, roce))
+ return;
+
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 6a0c6f965ad1..fa0288afc0dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -163,6 +163,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
sf_index = event->function_id - base_id;
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
+ case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
if (sf_dev)
mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a8e73c9ed1ea..1be048769309 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -136,10 +136,10 @@ static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
switch (hw_state) {
case MLX5_VHCA_STATE_ACTIVE:
case MLX5_VHCA_STATE_IN_USE:
- case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
return DEVLINK_PORT_FN_STATE_ACTIVE;
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
default:
return DEVLINK_PORT_FN_STATE_INACTIVE;
}
@@ -192,14 +192,17 @@ sf_err:
return err;
}
-static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
+ struct netlink_ext_ack *extack)
{
int err;
if (mlx5_sf_is_active(sf))
return 0;
- if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
- return -EINVAL;
+ if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
+ NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
+ return -EBUSY;
+ }
err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
if (err)
@@ -226,7 +229,8 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
struct mlx5_sf *sf,
- enum devlink_port_fn_state state)
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
{
int err = 0;
@@ -234,7 +238,7 @@ static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *ta
if (state == mlx5_sf_to_devlink_state(sf->hw_state))
goto out;
if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
- err = mlx5_sf_activate(dev, sf);
+ err = mlx5_sf_activate(dev, sf, extack);
else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
err = mlx5_sf_deactivate(dev, sf);
else
@@ -265,7 +269,7 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_po
goto out;
}
- err = mlx5_sf_state_set(dev, table, sf, state);
+ err = mlx5_sf_state_set(dev, table, sf, state, extack);
out:
mlx5_sf_table_put(table);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 1fbcd012bb85..7ccfd40586ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
int ret;
ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
- ft_attr.level = dmn->info.caps.max_ft_level - 2;
+ ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
+ MLX5_FT_MAX_MULTIPATH_LEVEL);
ft_attr.reformat_en = reformat_req;
ft_attr.decap_en = reformat_req;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 054c2e2b6554..7466f016375c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -694,7 +694,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
return -EINVAL;
- memcpy(padded_data, data, data_sz);
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
/* Remove L2L3 outer headers */
MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
@@ -706,32 +710,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
hw_action += DR_STE_ACTION_DOUBLE_SZ;
used_actions++; /* Remove and NOP are a single double action */
- inline_data_sz =
- MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
- /* Add the new header inline + 2 extra bytes */
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
void *addr_inline;
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_INLINE);
/* The hardware expects here offset to words (2 bytes) */
- MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
- i * 2);
+ MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
/* Copy bytes one by one to avoid endianness problem */
addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
hw_action, inline_data);
- memcpy(addr_inline, data_ptr, inline_data_sz);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
hw_action += DR_STE_ACTION_DOUBLE_SZ;
- data_ptr += inline_data_sz;
used_actions++;
}
- /* Remove 2 extra bytes */
+ /* Remove first 2 extra bytes */
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
- MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
/* The hardware expects here size in words (2 bytes) */
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
used_actions++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 612b0ac31db2..9737565cd8d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -124,10 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
- (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
- (MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_6DX));
+ return MLX5_CAP_GEN(dev, roce) &&
+ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+ (MLX5_CAP_GEN(dev, steering_format_version) <=
+ MLX5_STEERING_FORMAT_CONNECTX_6DX)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 01cc00ad8acf..b6931bbe52d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -424,6 +424,15 @@ err_modify_sq:
return err;
}
+static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+}
+
static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
{
int i;
@@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
MLX5_RQC_STATE_RST, 0, 0);
-
/* unset peer SQs */
- if (hp->peer_gone)
- return;
- for (i = 0; i < hp->num_channels; i++)
- mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
- MLX5_SQC_STATE_RST, 0, 0);
+ if (!hp->peer_gone)
+ mlx5_hairpin_unpair_peer_sq(hp);
}
struct mlx5_hairpin *
@@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
mlx5_hairpin_destroy_queues(hp);
kfree(hp);
}
+
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ mlx5_hairpin_unpair_peer_sq(hp);
+
+ /* destroy peer SQ */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+
+ hp->peer_gone = true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 457ad42eaa2a..4c1440a95ad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
void *in;
int err;
- if (!vport)
- return -EINVAL;
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EACCES;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index dfea14399607..85f0ce285146 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -693,7 +693,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
MLXSW_THERMAL_TRIP_MASK,
module_tz,
&mlxsw_thermal_module_ops,
- NULL, 0, 0);
+ NULL, 0,
+ module_tz->parent->polling_delay);
if (IS_ERR(module_tz->tzdev)) {
err = PTR_ERR(module_tz->tzdev);
return err;
@@ -815,7 +816,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
MLXSW_THERMAL_TRIP_MASK,
gearbox_tz,
&mlxsw_thermal_gearbox_ops,
- NULL, 0, 0);
+ NULL, 0,
+ gearbox_tz->parent->polling_delay);
if (IS_ERR(gearbox_tz->tzdev))
return PTR_ERR(gearbox_tz->tzdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 900b4bf5bb5b..2bc5a9003c6d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3907,7 +3907,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
-#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 04672eb5c7f3..9958d503bf0e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -1332,6 +1332,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
u8 band, u32 child_handle)
{
struct mlxsw_sp_qdisc *old_qdisc;
+ u32 parent;
if (band < mlxsw_sp_qdisc->num_classes &&
mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
@@ -1352,7 +1353,9 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
if (old_qdisc)
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
- mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
+ parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
+ parent);
if (!WARN_ON(!mlxsw_sp_qdisc))
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 3658c4ae3c37..ee921a99e439 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* Microchip ENCX24J600 ethernet driver
*
* Copyright (C) 2015 Gridpoint
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index f604a260ede7..fac61a8fbd02 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* encx24j600_hw.h: Register definitions
*
*/
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0c4283319d7f..adfb9781799e 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -379,6 +379,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
int ocelot_port_flush(struct ocelot *ocelot, int port)
{
+ unsigned int pause_ena;
int err, val;
/* Disable dequeuing from the egress queues */
@@ -387,6 +388,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
QSYS_PORT_MODE, port);
/* Disable flow control */
+ ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
/* Disable priority flow control */
@@ -422,6 +424,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
/* Clear flushing again. */
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+ /* Re-enable flow control */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
+
return err;
}
EXPORT_SYMBOL(ocelot_port_flush);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c84c8bf2bc20..fc99ad8e4a38 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3815,6 +3815,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"invalid sram_size %dB or board span %ldB\n",
mgp->sram_size, mgp->board_span);
+ status = -EINVAL;
goto abort_with_ioremap;
}
memcpy_fromio(mgp->eeprom_strings,
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 5f8b0bb3af6e..202973a82712 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -20,6 +20,7 @@ if NET_VENDOR_PENSANDO
config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
+ depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
select NET_DEVLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 7e6bac85495d..344ea1143454 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1602,6 +1602,8 @@ err_out_free_netdev:
free_netdev(netdev);
err_out_free_res:
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable_pdev:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 17d5b649eb36..e81dd34a3cac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1266,9 +1266,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
memcpy(&p_hwfn->p_dcbx_info->set.config.params,
&dcbx_info->operational.params,
- sizeof(struct qed_dcbx_admin_params));
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
p_hwfn->p_dcbx_info->set.config.valid = true;
memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 214e347097a7..2376b2729633 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -114,7 +114,7 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev,
value = readl(&port_regs->CommonRegs.semaphoreReg);
if ((value & (sem_mask >> 16)) == sem_bits)
return 0;
- ssleep(1);
+ mdelay(1000);
} while (--seconds);
return -1;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 96b947fde646..3beafc60747e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2690,6 +2690,7 @@ err_out_free_hw_res:
kfree(ahw);
err_out_free_res:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable_pdev:
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 41fbd2ceeede..ab1e0fcccabb 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
struct rmnet_priv *priv = netdev_priv(dev);
- struct rmnet_vnd_stats total_stats;
+ struct rmnet_vnd_stats total_stats = { };
struct rmnet_pcpu_stats *pcpu_ptr;
+ struct rmnet_vnd_stats snapshot;
unsigned int cpu, start;
- memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
-
for_each_possible_cpu(cpu) {
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
- total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
- total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
- total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
- total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
+ snapshot = pcpu_ptr->stats; /* struct assignment */
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
- total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
+ total_stats.rx_pkts += snapshot.rx_pkts;
+ total_stats.rx_bytes += snapshot.rx_bytes;
+ total_stats.tx_pkts += snapshot.tx_pkts;
+ total_stats.tx_bytes += snapshot.tx_bytes;
+ total_stats.tx_drops += snapshot.tx_drops;
}
s->rx_packets = total_stats.rx_pkts;
@@ -354,4 +354,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
}
return 0;
-} \ No newline at end of file
+}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 2c89cde7da1e..2ee72dc431cd 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1671,7 +1671,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch(stringset) {
case ETH_SS_STATS:
- memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
+ memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
break;
}
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c5b154868c1f..713d3629b4c1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2287,7 +2287,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *sh_eth_gstrings_stats,
+ memcpy(data, sh_eth_gstrings_stats,
sizeof(sh_eth_gstrings_stats));
break;
}
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index d1e908846f5d..22fbb0ae77fb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -90,6 +90,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx->pci_dev->irq);
goto fail1;
}
+ efx->irqs_hooked = true;
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index b70d44ac0990..3c73453725f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -76,10 +76,10 @@ enum power_event {
#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
/* GMAC HW ADDR regs */
-#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
- (reg * 8))
-#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
- (reg * 8))
+#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
+ 0x00000040 + (reg * 8))
+#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
+ 0x00000044 + (reg * 8))
#define GMAC_MAX_PERFECT_ADDRESSES 1
#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 345b4c6d1fd4..c87202cbd3d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1196,7 +1196,6 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
*/
static int stmmac_init_phy(struct net_device *dev)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct stmmac_priv *priv = netdev_priv(dev);
struct device_node *node;
int ret;
@@ -1222,8 +1221,12 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_connect_phy(priv->phylink, phydev);
}
- phylink_ethtool_get_wol(priv->phylink, &wol);
- device_set_wakeup_capable(priv->device, !!wol.supported);
+ if (!priv->plat->pmt) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phylink_ethtool_get_wol(priv->phylink, &wol);
+ device_set_wakeup_capable(priv->device, !!wol.supported);
+ }
return ret;
}
@@ -1237,8 +1240,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
priv->phylink_config.pcs_poll = true;
- priv->phylink_config.ovr_an_inband =
- priv->plat->mdio_bus_data->xpcs_an_inband;
+ if (priv->plat->mdio_bus_data)
+ priv->phylink_config.ovr_an_inband =
+ priv->plat->mdio_bus_data->xpcs_an_inband;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -5888,12 +5892,21 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = 0;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
ret = eth_mac_addr(ndev, addr);
if (ret)
- return ret;
+ goto set_mac_error;
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+set_mac_error:
+ pm_runtime_put(priv->device);
+
return ret;
}
@@ -6188,12 +6201,6 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
- return ret;
- }
-
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6219,6 +6226,12 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
bool is_double = false;
int ret;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -7036,7 +7049,6 @@ error_mdio_register:
stmmac_napi_del(ndev);
error_hw_init:
destroy_workqueue(priv->wq);
- stmmac_bus_clks_config(priv, false);
bitmap_free(priv->af_xdp_zc_qps);
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1e17a23d9118..a696ada013eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -622,6 +622,8 @@ error_pclk_get:
void stmmac_remove_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat)
{
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
of_node_put(plat->phy_node);
of_node_put(plat->mdio_node);
}
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 9030e619e543..97942b0e3897 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1350,8 +1350,8 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
KNAV_QUEUE_SHARED);
if (IS_ERR(tx_pipe->dma_queue)) {
- dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
- name, ret);
+ dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n",
+ name, tx_pipe->dma_queue);
ret = PTR_ERR(tx_pipe->dma_queue);
goto err;
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a1f5f07f4ca9..9a13953ea70f 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -774,12 +774,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
stat = be32_to_cpu(cur_p->app0);
while (stat & STS_CTRL_APP0_CMPLT) {
+ /* Make sure that the other fields are read after bd is
+ * released by dma
+ */
+ rmb();
dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
skb = (struct sk_buff *)ptr_from_txbd(cur_p);
if (skb)
dev_consume_skb_irq(skb);
- cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app3 = 0;
@@ -788,6 +791,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
+ /* app0 must be visible last, as it is used to flag
+ * availability of the bd
+ */
+ smp_mb();
+ cur_p->app0 = 0;
+
lp->tx_bd_ci++;
if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci = 0;
@@ -814,6 +823,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
if (cur_p->app0)
return NETDEV_TX_BUSY;
+ /* Make sure to read next bd app0 after this one */
+ rmb();
+
tail++;
if (tail >= lp->tx_bd_num)
tail = 0;
@@ -849,7 +861,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
smp_mb();
/* Space might have just been freed - check again */
- if (temac_check_tx_bd_space(lp, num_frag))
+ if (temac_check_tx_bd_space(lp, num_frag + 1))
return NETDEV_TX_BUSY;
netif_wake_queue(ndev);
@@ -876,7 +888,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
cur_p->phys = cpu_to_be32(skb_dma_addr);
- ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
@@ -915,6 +926,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
+ /* Mark last fragment with skb address, so it can be consumed
+ * in temac_start_xmit_done()
+ */
+ ptr_to_txbd((void *)skb, cur_p);
+
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= lp->tx_bd_num)
@@ -926,6 +942,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
wmb();
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+ if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+ netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
+ netif_stop_queue(ndev);
+ }
+
return NETDEV_TX_OK;
}