aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/agere/et131x.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/7990.h2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c25
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c120
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c261
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h216
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h8
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/cadence/Kconfig2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c24
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c61
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c498
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c29
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c59
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h42
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c139
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h155
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c275
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c3
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c11
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c86
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c5
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c12
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c7
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c17
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c12
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c16
-rw-r--r--drivers/net/ethernet/ni/nixge.c3
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c20
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h1089
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c162
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c136
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c253
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h16
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c162
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c167
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c214
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c27
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c25
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h12
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c82
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h17
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h10
-rw-r--r--drivers/net/ethernet/sfc/nic.h11
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/siena.c8
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c146
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c16
-rw-r--r--drivers/net/ethernet/sun/cassini.c12
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c12
-rw-r--r--drivers/net/ethernet/ti/Kconfig26
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c36
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c210
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h13
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c626
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h29
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c1086
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h74
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c4
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig1
197 files changed, 6882 insertions, 1732 deletions
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 1b19385ad8a9..865892c1f23f 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -714,11 +714,11 @@ static int et131x_init_eeprom(struct et131x_adapter *adapter)
* gather additional information that normally would
* come from the eeprom, like MAC Address
*/
- adapter->has_eeprom = 0;
+ adapter->has_eeprom = false;
return -EIO;
}
}
- adapter->has_eeprom = 1;
+ adapter->has_eeprom = true;
/* Read the EEPROM for information regarding LED behavior. Refer to
* et131x_xcvr_init() for its use.
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 18d3b4340bd4..b3b8a8010142 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -417,7 +417,7 @@ static void emac_timeout(struct net_device *dev, unsigned int txqueue)
/* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
unsigned long channel;
@@ -425,7 +425,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
channel = db->tx_fifo_stat & 3;
if (channel == 3)
- return 1;
+ return NETDEV_TX_BUSY;
channel = (channel == 1 ? 1 : 0);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1671c1f36691..907125abef2c 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -554,7 +554,7 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
* physically contiguous fragment starting at
* skb->data, for length of skb_headlen(skb).
*/
-static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
unsigned int txsize = priv->tx_ring_size;
@@ -562,7 +562,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tse_buffer *buffer = NULL;
int nfrags = skb_shinfo(skb)->nr_frags;
unsigned int nopaged_len = skb_headlen(skb);
- enum netdev_tx ret = NETDEV_TX_OK;
+ netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
spin_lock_bh(&priv->tx_lock);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index bd278c4721c6..7df67bf09b93 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -69,7 +69,7 @@
* 16kB.
*/
#if PAGE_SIZE > SZ_16K
-#define ENA_PAGE_SIZE SZ_16K
+#define ENA_PAGE_SIZE (_AC(SZ_16K, UL))
#else
#define ENA_PAGE_SIZE PAGE_SIZE
#endif
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index cf3562e82ca9..50fb66369415 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -536,7 +536,7 @@ void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
-int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 8266b3c1fefc..e53551daeea1 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -241,7 +241,7 @@ struct lance_private {
/* Now the prototypes we export */
int lance_open(struct net_device *dev);
int lance_close(struct net_device *dev);
-int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
void lance_set_multicast(struct net_device *dev);
void lance_tx_timeout(struct net_device *dev, unsigned int txqueue);
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 4e36122609a3..961796abab35 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -156,7 +156,7 @@ struct lance_memory {
struct lance_init_block init;
struct lance_tx_head tx_head[TX_RING_SIZE];
struct lance_rx_head rx_head[RX_RING_SIZE];
- char packet_area[0]; /* packet data follow after the
+ char packet_area[]; /* packet data follow after the
* init block and the ring
* descriptors and are located
* at runtime */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 53620ba6d7a6..52ad9433cabc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_common.h: Basic includes for all files in project. */
@@ -53,14 +54,14 @@
#define AQ_NIC_RATE_10G BIT(0)
#define AQ_NIC_RATE_5G BIT(1)
#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2GS BIT(3)
+#define AQ_NIC_RATE_2G5 BIT(3)
#define AQ_NIC_RATE_1G BIT(4)
#define AQ_NIC_RATE_100M BIT(5)
#define AQ_NIC_RATE_10M BIT(6)
#define AQ_NIC_RATE_EEE_10G BIT(7)
#define AQ_NIC_RATE_EEE_5G BIT(8)
-#define AQ_NIC_RATE_EEE_2GS BIT(9)
+#define AQ_NIC_RATE_EEE_2G5 BIT(9)
#define AQ_NIC_RATE_EEE_1G BIT(10)
#define AQ_NIC_RATE_EEE_100M BIT(11)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 0c9dd8edc062..86fc77d85fda 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -605,7 +605,7 @@ static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
if (speed & AQ_NIC_RATE_EEE_10G)
rate |= SUPPORTED_10000baseT_Full;
- if (speed & AQ_NIC_RATE_EEE_2GS)
+ if (speed & AQ_NIC_RATE_EEE_2G5)
rate |= SUPPORTED_2500baseX_Full;
if (speed & AQ_NIC_RATE_EEE_1G)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index f97b073efd8e..1c6d12deb47a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_nic.c: Definition of common code for NIC. */
@@ -271,6 +272,14 @@ exit:
return err;
}
+static bool aq_nic_is_valid_ether_addr(const u8 *addr)
+{
+ /* Some engineering samples of Aquantia NICs are provisioned with a
+ * partially populated MAC, which is still invalid.
+ */
+ return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
+}
+
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
@@ -295,6 +304,12 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
if (err)
goto err_exit;
+ if (!is_valid_ether_addr(self->ndev->dev_addr) ||
+ !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
+ netdev_warn(self->ndev, "MAC is invalid, will use random.");
+ eth_hw_addr_random(self->ndev);
+ }
+
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
{
static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
@@ -894,7 +909,7 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
5000baseT_Full);
- if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
ethtool_link_ksettings_add_link_mode(cmd, supported,
2500baseT_Full);
@@ -937,7 +952,7 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
5000baseT_Full);
- if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
2500baseT_Full);
@@ -996,7 +1011,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
break;
case SPEED_2500:
- rate = AQ_NIC_RATE_2GS;
+ rate = AQ_NIC_RATE_2G5;
break;
case SPEED_5000:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index ce46cdbc69e6..d10fff8a8c71 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -65,7 +65,7 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
{ AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
- { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
{ AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
{ AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
{ AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index eee265b4415a..1b0670a8ae33 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
@@ -47,7 +48,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -57,7 +58,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -66,7 +67,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -74,7 +75,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = AQ_NIC_RATE_2GS |
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -267,8 +268,7 @@ static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- aq_hw_write_reg(self, 0x00007040U, ATL_HW_IS_CHIP_FEATURE(self, TPO2) ?
- 0x00010000U : 0x00000000U);
+ aq_hw_write_reg(self, 0x00007040U, 0x00000000U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index cbb7a00d61b4..fa3cd7e9954b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
@@ -59,7 +60,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -69,7 +70,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -78,7 +79,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -86,7 +87,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = AQ_NIC_RATE_2GS |
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -215,8 +216,8 @@ err_exit:
return err;
}
-int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
- struct aq_rss_parameters *rss_params)
+static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
{
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
u8 *indirection_table = rss_params->indirection_table;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index f5091d79ab43..b855459272ca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware
@@ -35,8 +36,6 @@ extern const struct aq_hw_ops hw_atl_ops_b0;
int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
-int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
- struct aq_rss_parameters *rss_params);
int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 1100d40a0302..73c0f41df8d8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
@@ -687,7 +688,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
link_status->mbps = 5000U;
break;
- case HAL_ATLANTIC_RATE_2GS:
+ case HAL_ATLANTIC_RATE_2G5:
link_status->mbps = 2500U;
break;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 99c1b6644ec3..0b4b54fc1de0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
@@ -418,7 +419,7 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_10G BIT(0)
#define HAL_ATLANTIC_RATE_5G BIT(1)
#define HAL_ATLANTIC_RATE_5GSR BIT(2)
-#define HAL_ATLANTIC_RATE_2GS BIT(3)
+#define HAL_ATLANTIC_RATE_2G5 BIT(3)
#define HAL_ATLANTIC_RATE_1G BIT(4)
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 1ad10cc14918..eeedd8c90067 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
@@ -134,7 +135,7 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
if (speed & AQ_NIC_RATE_5GSR)
rate |= FW2X_RATE_5G;
- if (speed & AQ_NIC_RATE_2GS)
+ if (speed & AQ_NIC_RATE_2G5)
rate |= FW2X_RATE_2G5;
if (speed & AQ_NIC_RATE_1G)
@@ -155,7 +156,7 @@ static u32 fw2x_to_eee_mask(u32 speed)
if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK)
rate |= AQ_NIC_RATE_EEE_5G;
if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK)
- rate |= AQ_NIC_RATE_EEE_2GS;
+ rate |= AQ_NIC_RATE_EEE_2G5;
if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK)
rate |= AQ_NIC_RATE_EEE_1G;
@@ -170,7 +171,7 @@ static u32 eee_mask_to_fw2x(u32 speed)
rate |= HW_ATL_FW2X_CAP_EEE_10G_MASK;
if (speed & AQ_NIC_RATE_EEE_5G)
rate |= HW_ATL_FW2X_CAP_EEE_5G_MASK;
- if (speed & AQ_NIC_RATE_EEE_2GS)
+ if (speed & AQ_NIC_RATE_EEE_2G5)
rate |= HW_ATL_FW2X_CAP_EEE_2G5_MASK;
if (speed & AQ_NIC_RATE_EEE_1G)
rate |= HW_ATL_FW2X_CAP_EEE_1G_MASK;
@@ -282,8 +283,6 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
u32 mac_addr[2] = { 0 };
int err = 0;
- u32 h = 0U;
- u32 l = 0U;
if (efuse_addr != 0) {
err = hw_atl_utils_fw_downld_dwords(self,
@@ -298,26 +297,6 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
ether_addr_copy(mac, (u8 *)mac_addr);
- if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
- unsigned int rnd = 0;
-
- get_random_bytes(&rnd, sizeof(unsigned int));
-
- l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16);
- h = 0x8001300EU;
-
- mac[5] = (u8)(0xFFU & l);
- l >>= 8;
- mac[4] = (u8)(0xFFU & l);
- l >>= 8;
- mac[3] = (u8)(0xFFU & l);
- l >>= 8;
- mac[2] = (u8)(0xFFU & l);
- mac[1] = (u8)(0xFFU & h);
- h >>= 8;
- mac[0] = (u8)(0xFFU & h);
- }
-
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index 04d194f754fa..6f2b33ae3d06 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -60,7 +60,7 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M |
AQ_NIC_RATE_10M,
@@ -172,7 +172,7 @@ static int hw_atl2_hw_rss_set(struct aq_hw_s *self,
for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;)
hw_atl2_new_rpf_rss_redir_set(self, 0, i, indirection_table[i]);
- return hw_atl_b0_hw_rss_set(self, rss_params);
+ return aq_hw_err_from_flags(self);
}
static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
index 85ccc9a011a0..f3766780e975 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
@@ -75,14 +75,6 @@ int hw_atl2_utils_soft_reset(struct aq_hw_s *self)
u32 rbl_request;
int err;
- err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_boot_reg_get, self,
- rbl_status,
- ((rbl_status & AQ_A2_BOOT_STARTED) &&
- (rbl_status != 0xFFFFFFFFu)),
- 10, 500000);
- if (err)
- aq_pr_trace("Boot code probably hanged, reboot anyway");
-
hw_atl2_mif_host_req_int_clr(self, 0x01);
rbl_request = AQ_A2_FW_BOOT_REQ_REBOOT;
#ifdef AQ_CFG_FAST_START
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
index 2317dd8459d0..b66fa346581c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
@@ -103,7 +103,7 @@ struct sleep_proxy_s {
u32 crc32;
} wake_up_pattern[8];
- struct __attribute__ ((__packed__)) {
+ struct __packed {
u8 arp_responder:1;
u8 echo_responder:1;
u8 igmp_client:1;
@@ -119,7 +119,7 @@ struct sleep_proxy_s {
u32 ipv4_offload_addr[8];
u32 reserved[8];
- struct __attribute__ ((__packed__)) {
+ struct __packed {
u8 ns_responder:1;
u8 echo_responder:1;
u8 mld_client:1;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index f5fb4b11f51a..0ffc33bd67d0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -6,6 +6,7 @@
#include <linux/iopoll.h>
#include "aq_hw.h"
+#include "aq_hw_utils.h"
#include "hw_atl/hw_atl_llh.h"
#include "hw_atl2_utils.h"
#include "hw_atl2_llh.h"
@@ -129,7 +130,7 @@ static void a2_link_speed_mask2fw(u32 speed,
link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
- link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2GS);
+ link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
link_options->rate_N2P5G = link_options->rate_2P5G;
link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
link_options->rate_100M = !!(speed & AQ_NIC_RATE_100M);
@@ -212,28 +213,6 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
hw_atl2_shared_buffer_get(self, mac_address, mac_address);
ether_addr_copy(mac, (u8 *)mac_address.aligned.mac_address);
- if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
- unsigned int rnd = 0;
- u32 h;
- u32 l;
-
- get_random_bytes(&rnd, sizeof(unsigned int));
-
- l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16);
- h = 0x8001300EU;
-
- mac[5] = (u8)(0xFFU & l);
- l >>= 8;
- mac[4] = (u8)(0xFFU & l);
- l >>= 8;
- mac[3] = (u8)(0xFFU & l);
- l >>= 8;
- mac[2] = (u8)(0xFFU & l);
- mac[1] = (u8)(0xFFU & h);
- h >>= 8;
- mac[0] = (u8)(0xFFU & h);
- }
-
return 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
index fbe9d88b13c7..36c7cf05630a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -846,8 +846,7 @@ static int get_ingress_sakey_record(struct aq_hw_s *hw,
rec->key[7] = packed_record[14];
rec->key[7] |= packed_record[15] << 16;
- rec->key_len = (rec->key_len & 0xFFFFFFFC) |
- (packed_record[16] & 0x3);
+ rec->key_len = packed_record[16] & 0x3;
return 0;
}
@@ -1158,6 +1157,7 @@ static int set_egress_ctlf_record(struct aq_hw_s *hw,
packed_record[0] = rec->sa_da[0] & 0xFFFF;
packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+
packed_record[2] = rec->sa_da[1] & 0xFFFF;
packed_record[3] = rec->eth_type & 0xFFFF;
@@ -1552,7 +1552,7 @@ static int set_egress_sc_record(struct aq_hw_s *hw,
packed_record[5] |= (rec->sak_len & 0x3) << 4;
- packed_record[7] |= (rec->valid & 0x1) << 15;
+ packed_record[7] = (rec->valid & 0x1) << 15;
return set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSCRECORD + table_index);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 00bd7bd55794..decab9a8e4a8 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1186,7 +1186,7 @@ static void atl1c_start_mac(struct atl1c_adapter *adapter)
struct atl1c_hw *hw = &adapter->hw;
u32 mac, txq, rxq;
- hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false;
+ hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX;
hw->mac_speed = adapter->link_speed == SPEED_1000 ?
atl1c_mac_speed_1000 : atl1c_mac_speed_10_100;
@@ -2449,12 +2449,6 @@ static int atl1c_resume(struct device *dev)
atl1c_reset_mac(&adapter->hw);
atl1c_phy_init(&adapter->hw);
-#if 0
- AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
- pm_data &= ~PM_CTRLSTAT_PME_EN;
- AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
-#endif
-
netif_device_attach(netdev);
if (netif_running(netdev))
atl1c_up(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 271e7034fa70..b35fcfcd692d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1042,7 +1042,7 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
* each ring/block may need up to 8 bytes for alignment, hence the
* additional 40 bytes tacked onto the end.
*/
- ring_header->size = size =
+ ring_header->size =
sizeof(struct tx_packet_desc) * tpd_ring->count
+ sizeof(struct rx_free_desc) * rfd_ring->count
+ sizeof(struct rx_return_desc) * rrd_ring->count
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index a5d1a6cb9ce3..6795b6d95f54 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
+ struct resource *regs;
const u8 *mac_addr;
bgmac = bgmac_alloc(&pdev->dev);
@@ -206,16 +207,21 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- bgmac->plat.idm_base =
- devm_platform_ioremap_resource_byname(pdev, "idm_base");
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
- bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
+ bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+ }
- bgmac->plat.nicpm_base =
- devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+ if (regs) {
+ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
+ }
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 2c6ba046d2a8..17ae6df90723 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1145,7 +1145,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
break;
}
}
- if (false == pg_found) {
+ if (!pg_found) {
data[help_data->num_of_pg].pg = add_pg;
data[help_data->num_of_pg].pg_priority =
(1 << ttp[add_traf_type]);
@@ -1155,7 +1155,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
}
DP(BNX2X_MSG_DCB,
"add_traf_type %d pg_found %s num_of_pg %d\n",
- add_traf_type, (false == pg_found) ? "NO" : "YES",
+ add_traf_type, !pg_found ? "NO" : "YES",
help_data->num_of_pg);
}
}
@@ -1544,8 +1544,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
entry = 0;
- if (i == (num_of_pri-1) &&
- false == b_found_strict)
+ if (i == (num_of_pri-1) && !b_found_strict)
/* last entry will be handled separately
* If no priority is strict than last
* entry goes to last queue.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5097a44686b3..b4476f44e386 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -331,27 +331,6 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
BP_VFDB(bp)->vf_sbs_pool++;
}
-static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *obj,
- atomic_t *counter)
-{
- struct list_head *pos;
- int read_lock;
- int cnt = 0;
-
- read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
- if (read_lock)
- DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
-
- list_for_each(pos, &obj->head)
- cnt++;
-
- if (!read_lock)
- bnx2x_vlan_mac_h_read_unlock(bp, obj);
-
- atomic_set(counter, cnt);
-}
-
static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid, bool drv_only, int type)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index fead64f1ad90..f86b6217f829 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1766,7 +1766,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- bnapi->cp_ring.rx_buf_errors++;
+ bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
netdev_warn(bp->dev, "RX buffer error %x\n",
rx_err);
@@ -1849,7 +1849,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- bnapi->cp_ring.rx_l4_csum_errors++;
+ bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
}
}
@@ -5045,8 +5045,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req.lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN);
+ req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
@@ -5356,9 +5355,9 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (BNXT_PF(bp))
- db->doorbell = bp->bar1 + 0x10000;
+ db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
else
- db->doorbell = bp->bar1 + 0x4000;
+ db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
@@ -6365,6 +6364,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u32 min_db_offset = 0;
u16 flags;
int rc;
@@ -6413,6 +6413,21 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (!bp->max_mtu)
bp->max_mtu = BNXT_MAX_MTU;
+ if (bp->db_size)
+ goto func_qcfg_exit;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_PF(bp))
+ min_db_offset = DB_PF_OFFSET_P5;
+ else
+ min_db_offset = DB_VF_OFFSET_P5;
+ }
+ bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024);
+ if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
+ bp->db_size <= min_db_offset)
+ bp->db_size = pci_resource_len(bp->pdev, 2);
+
func_qcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6434,23 +6449,13 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
if (!rc) {
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
- int i;
+ int i, tqm_rings;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
rc = -ENOMEM;
goto ctx_err;
}
- ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
- if (!ctx_pg) {
- kfree(ctx);
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
-
- bp->ctx = ctx;
ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
@@ -6483,6 +6488,20 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
+ ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
+ if (!ctx->tqm_fp_rings_count)
+ ctx->tqm_fp_rings_count = bp->max_q;
+
+ tqm_rings = ctx->tqm_fp_rings_count + 1;
+ ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
+ if (!ctx_pg) {
+ kfree(ctx);
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < tqm_rings; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+ bp->ctx = ctx;
} else {
rc = 0;
}
@@ -6642,7 +6661,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
int rc;
if (!mem_size)
- return 0;
+ return -EINVAL;
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
@@ -6735,7 +6754,7 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
return;
if (ctx->tqm_mem[0]) {
- for (i = 0; i < bp->max_q + 1; i++)
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
kfree(ctx->tqm_mem[0]);
ctx->tqm_mem[0] = NULL;
@@ -6756,6 +6775,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries;
+ u32 entries_sp, min;
u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
@@ -6845,14 +6865,17 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
- entries = ctx->qp_max_l2_entries + extra_qps;
+ min = ctx->tqm_min_entries_per_ring;
+ entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
+ 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
+ entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
+ entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
entries = roundup(entries, ctx->tqm_entries_multiple);
- entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
- ctx->tqm_max_entries_per_ring);
- for (i = 0; i < bp->max_q + 1; i++) {
+ entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
- ctx_pg->entries = entries;
- mem_size = ctx->tqm_entry_size * entries;
+ ctx_pg->entries = i ? entries : entries_sp;
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc)
return rc;
@@ -9780,6 +9803,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_features_t vlan_features;
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
@@ -9796,12 +9820,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together.
*/
- if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
- (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+ vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX)) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX);
- else
+ else if (vlan_features)
features |= NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX;
}
@@ -10262,7 +10288,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
- cpr->missed_irqs++;
+ cpr->sw_stats.cmn.missed_irqs++;
}
}
}
@@ -10891,6 +10917,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->dev = dev;
bp->pdev = pdev;
+ /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
+ * determines the BAR size.
+ */
bp->bar0 = pci_ioremap_bar(pdev, 0);
if (!bp->bar0) {
dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
@@ -10898,13 +10927,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
goto init_err_release;
}
- bp->bar1 = pci_ioremap_bar(pdev, 2);
- if (!bp->bar1) {
- dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
- rc = -ENOMEM;
- goto init_err_release;
- }
-
bp->bar2 = pci_ioremap_bar(pdev, 4);
if (!bp->bar2) {
dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
@@ -11826,6 +11848,16 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
return 0;
}
+static int bnxt_map_db_bar(struct bnxt *bp)
+{
+ if (!bp->db_size)
+ return -ENODEV;
+ bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
+ if (!bp->bar1)
+ return -ENOMEM;
+ return 0;
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -11886,6 +11918,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ rc = bnxt_map_db_bar(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
+ rc);
+ goto init_err_pci_clean;
+ }
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
@@ -12212,12 +12251,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
bnxt_ulp_start(bp, err);
}
- if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
- dev_close(netdev);
+ if (result != PCI_ERS_RESULT_RECOVERED) {
+ if (netif_running(netdev))
+ dev_close(netdev);
+ pci_disable_device(pdev);
+ }
rtnl_unlock();
- return PCI_ERS_RESULT_RECOVERED;
+ return result;
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f2caa2756f5b..c04ac4a36005 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -537,6 +537,9 @@ struct nqe_cn {
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
#define DBR_TYPE_NULL (0xfULL << 60)
+#define DB_PF_OFFSET_P5 0x10000
+#define DB_VF_OFFSET_P5 0x4000
+
#define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes
@@ -907,6 +910,20 @@ struct bnxt_rx_ring_info {
struct page_pool *page_pool;
};
+struct bnxt_rx_sw_stats {
+ u64 rx_l4_csum_errors;
+ u64 rx_buf_errors;
+};
+
+struct bnxt_cmn_sw_stats {
+ u64 missed_irqs;
+};
+
+struct bnxt_sw_stats {
+ struct bnxt_rx_sw_stats rx;
+ struct bnxt_cmn_sw_stats cmn;
+};
+
struct bnxt_cp_ring_info {
struct bnxt_napi *bnapi;
u32 cp_raw_cons;
@@ -934,9 +951,8 @@ struct bnxt_cp_ring_info {
struct ctx_hw_stats *hw_stats;
dma_addr_t hw_stats_map;
u32 hw_stats_ctx_id;
- u64 rx_l4_csum_errors;
- u64 rx_buf_errors;
- u64 missed_irqs;
+
+ struct bnxt_sw_stats sw_stats;
struct bnxt_ring_struct cp_ring_struct;
@@ -1066,7 +1082,6 @@ struct bnxt_vf_info {
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
#define BNXT_VF_TRUST 0x10
- u32 func_flags; /* func cfg flags */
u32 min_tx_rate;
u32 max_tx_rate;
void *hwrm_cmd_req_addr;
@@ -1357,6 +1372,7 @@ struct bnxt_ctx_mem_info {
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
+ u8 tqm_fp_rings_count;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
@@ -1816,6 +1832,7 @@ struct bnxt {
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
+ int db_size;
#define BNXT_NTP_FLTR_MAX_FLTR 4096
#define BNXT_NTP_FLTR_HASH_SIZE 512
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 95f893f2a74d..d5c8bd49383a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -43,7 +43,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
#define BNXT_NVM_CFG_VER_BITS 24
#define BNXT_NVM_CFG_VER_BYTES 4
-#define BNXT_MSIX_VEC_MAX 1280
+#define BNXT_MSIX_VEC_MAX 512
#define BNXT_MSIX_VEC_MIN_MAX 128
enum bnxt_nvm_dir_type {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 34046a6286e8..dd0c3f227009 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,7 @@ reset_coalesce:
return rc;
}
-static const char * const bnxt_ring_stats_str[] = {
+static const char * const bnxt_ring_rx_stats_str[] = {
"rx_ucast_packets",
"rx_mcast_packets",
"rx_bcast_packets",
@@ -146,6 +146,9 @@ static const char * const bnxt_ring_stats_str[] = {
"rx_ucast_bytes",
"rx_mcast_bytes",
"rx_bcast_bytes",
+};
+
+static const char * const bnxt_ring_tx_stats_str[] = {
"tx_ucast_packets",
"tx_mcast_packets",
"tx_bcast_packets",
@@ -171,9 +174,12 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
"rx_tpa_errors",
};
-static const char * const bnxt_ring_sw_stats_str[] = {
+static const char * const bnxt_rx_sw_stats_str[] = {
"rx_l4_csum_errors",
"rx_buf_errors",
+};
+
+static const char * const bnxt_cmn_sw_stats_str[] = {
"missed_irqs",
};
@@ -303,6 +309,11 @@ static struct {
{0, "tx_total_discard_pkts"},
};
+#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
+#define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
+#define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
+#define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
+
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
@@ -482,12 +493,21 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
static int bnxt_get_num_ring_stats(struct bnxt *bp)
{
- int num_stats;
+ int rx, tx, cmn;
+ bool sh = false;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ sh = true;
- num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
- ARRAY_SIZE(bnxt_ring_sw_stats_str) +
- bnxt_get_num_tpa_ring_stats(bp);
- return num_stats * bp->cp_nr_rings;
+ rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
+ bnxt_get_num_tpa_ring_stats(bp);
+ tx = NUM_RING_TX_HW_STATS;
+ cmn = NUM_RING_CMN_SW_STATS;
+ if (sh)
+ return (rx + tx + cmn) * bp->cp_nr_rings;
+ else
+ return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ cmn * bp->cp_nr_rings;
}
static int bnxt_get_num_stats(struct bnxt *bp)
@@ -528,13 +548,29 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset)
}
}
+static bool is_rx_ring(struct bnxt *bp, int ring_num)
+{
+ return ring_num < bp->rx_nr_rings;
+}
+
+static bool is_tx_ring(struct bnxt *bp, int ring_num)
+{
+ int tx_base = 0;
+
+ if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
+ tx_base = bp->rx_nr_rings;
+
+ if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
+ return true;
+ return false;
+}
+
static void bnxt_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *buf)
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
- u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
- bnxt_get_num_tpa_ring_stats(bp);
+ u32 tpa_stats;
if (!bp->bnapi) {
j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
@@ -544,17 +580,42 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
bnxt_sw_func_stats[i].counter = 0;
+ tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
__le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ u64 *sw;
int k;
- for (k = 0; k < stat_fields; j++, k++)
+ if (is_rx_ring(bp, i)) {
+ for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ }
+ if (is_tx_ring(bp, i)) {
+ k = NUM_RING_RX_HW_STATS;
+ for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
+ j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ }
+ if (!tpa_stats || !is_rx_ring(bp, i))
+ goto skip_tpa_ring_stats;
+
+ k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
+ for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
+ tpa_stats; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
- buf[j++] = cpr->rx_l4_csum_errors;
- buf[j++] = cpr->rx_buf_errors;
- buf[j++] = cpr->missed_irqs;
+
+skip_tpa_ring_stats:
+ sw = (u64 *)&cpr->sw_stats.rx;
+ if (is_rx_ring(bp, i)) {
+ for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
+ buf[j] = sw[k];
+ }
+
+ sw = (u64 *)&cpr->sw_stats.cmn;
+ for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
+ buf[j] = sw[k];
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
@@ -632,31 +693,48 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- num_str = ARRAY_SIZE(bnxt_ring_stats_str);
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_rx_ring(bp, i)) {
+ num_str = NUM_RING_RX_HW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_rx_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
}
- if (!BNXT_SUPPORTS_TPA(bp))
+ if (is_tx_ring(bp, i)) {
+ num_str = NUM_RING_TX_HW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_tx_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+ num_str = bnxt_get_num_tpa_ring_stats(bp);
+ if (!num_str || !is_rx_ring(bp, i))
goto skip_tpa_stats;
- if (bp->max_tpa_v2) {
- num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ if (bp->max_tpa_v2)
str = bnxt_ring_tpa2_stats_str;
- } else {
- num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ else
str = bnxt_ring_tpa_stats_str;
- }
+
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i, str[j]);
buf += ETH_GSTRING_LEN;
}
skip_tpa_stats:
- num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
+ if (is_rx_ring(bp, i)) {
+ num_str = NUM_RING_RX_SW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_rx_sw_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+ num_str = NUM_RING_CMN_SW_STATS;
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i,
- bnxt_ring_sw_stats_str[j]);
+ bnxt_cmn_sw_stats_str[j]);
buf += ETH_GSTRING_LEN;
}
}
@@ -1749,8 +1827,8 @@ static int bnxt_flash_nvram(struct net_device *dev,
return rc;
}
-static int bnxt_firmware_reset(struct net_device *dev,
- u16 dir_type)
+static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags)
{
struct hwrm_fw_reset_input req = {0};
struct bnxt *bp = netdev_priv(dev);
@@ -1758,48 +1836,77 @@ static int bnxt_firmware_reset(struct net_device *dev,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
+ req.embedded_proc_type = proc_type;
+ req.selfrst_status = self_reset;
+ req.flags = flags;
+
+ if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ } else {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
+ }
+ return rc;
+}
+
+static int bnxt_firmware_reset(struct net_device *dev,
+ enum bnxt_nvm_directory_type dir_type)
+{
+ u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
+ u8 proc_type, flags = 0;
+
/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
/* (e.g. when firmware isn't already running) */
switch (dir_type) {
case BNX_DIR_TYPE_CHIMP_PATCH:
case BNX_DIR_TYPE_BOOTCODE:
case BNX_DIR_TYPE_BOOTCODE_2:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
/* Self-reset ChiMP upon next PCIe reset: */
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+ self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_APE_FW:
case BNX_DIR_TYPE_APE_PATCH:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
/* Self-reset APE upon next PCIe reset: */
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+ self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
- req.embedded_proc_type =
- FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
break;
case BNX_DIR_TYPE_BONO_FW:
case BNX_DIR_TYPE_BONO_PATCH:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
- break;
- case BNXT_FW_RESET_CHIP:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
- if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
- req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
- break;
- case BNXT_FW_RESET_AP:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
break;
default:
return -EINVAL;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc == -EACCES)
- bnxt_print_admin_err(bp);
- return rc;
+ return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
+}
+
+static int bnxt_firmware_reset_chip(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 flags = 0;
+
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
+
+ return bnxt_hwrm_firmware_reset(dev,
+ FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
+ flags);
+}
+
+static int bnxt_firmware_reset_ap(struct net_device *dev)
+{
+ return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
+ 0);
}
static int bnxt_flash_firmware(struct net_device *dev,
@@ -1988,9 +2095,9 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
rc, filename);
return rc;
}
- if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+ if (bnxt_dir_type_is_ape_bin_format(dir_type))
rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
- else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+ else if (bnxt_dir_type_is_other_exec_format(dir_type))
rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
else
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
@@ -2377,7 +2484,7 @@ static int bnxt_set_eeprom(struct net_device *dev,
}
/* Create or re-write an NVM item: */
- if (bnxt_dir_type_is_executable(type) == true)
+ if (bnxt_dir_type_is_executable(type))
return -EOPNOTSUPP;
ext = eeprom->magic & 0xffff;
ordinal = eeprom->offset >> 16;
@@ -2975,7 +3082,11 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
static int bnxt_reset(struct net_device *dev, u32 *flags)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = 0;
+ bool reload = false;
+ u32 req = *flags;
+
+ if (!req)
+ return -EINVAL;
if (!BNXT_PF(bp)) {
netdev_err(dev, "Reset is not supported from a VF\n");
@@ -2989,33 +3100,37 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return -EBUSY;
}
- if (*flags == ETH_RESET_ALL) {
+ if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
/* This feature is not supported in older firmware versions */
- if (bp->hwrm_spec_code < 0x10803)
- return -EOPNOTSUPP;
-
- rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
- if (!rc) {
- netdev_info(dev, "Reset request successful.\n");
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
- netdev_info(dev, "Reload driver to complete reset\n");
- *flags = 0;
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_chip(dev)) {
+ netdev_info(dev, "Firmware reset request successful.\n");
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
+ reload = true;
+ *flags &= ~BNXT_FW_RESET_CHIP;
+ }
+ } else if (req == BNXT_FW_RESET_CHIP) {
+ return -EOPNOTSUPP; /* only request, fail hard */
}
- } else if (*flags == ETH_RESET_AP) {
- /* This feature is not supported in older firmware versions */
- if (bp->hwrm_spec_code < 0x10803)
- return -EOPNOTSUPP;
+ }
- rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
- if (!rc) {
- netdev_info(dev, "Reset Application Processor request successful.\n");
- *flags = 0;
+ if (req & BNXT_FW_RESET_AP) {
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_ap(dev)) {
+ netdev_info(dev, "Reset application processor successful.\n");
+ reload = true;
+ *flags &= ~BNXT_FW_RESET_AP;
+ }
+ } else if (req == BNXT_FW_RESET_AP) {
+ return -EOPNOTSUPP; /* only request, fail hard */
}
- } else {
- rc = -EINVAL;
}
- return rc;
+ if (reload)
+ netdev_info(dev, "Reload driver to complete reset\n");
+
+ return 0;
}
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 3576d951727b..ce7585ff9e4d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -77,8 +77,12 @@ struct hwrm_dbg_cmn_output {
#define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
-#define BNXT_FW_RESET_AP 0xfffe
-#define BNXT_FW_RESET_CHIP 0xffff
+#define BNXT_FW_RESET_AP (ETH_RESET_AP << ETH_RESET_SHARED_SHIFT)
+#define BNXT_FW_RESET_CHIP ((ETH_RESET_MGMT | ETH_RESET_IRQ | \
+ ETH_RESET_DMA | ETH_RESET_FILTER | \
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC | \
+ ETH_RESET_PHY | ETH_RESET_RAM) \
+ << ETH_RESET_SHARED_SHIFT)
extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 7cf27dffadb5..7e9235c8d21e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2019 Broadcom Inc.
+ * Copyright (c) 2018-2020 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -207,6 +207,8 @@ struct cmd_nums {
#define HWRM_PORT_PHY_MDIO_READ 0xb6UL
#define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
#define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_PORT_ECN_QSTATS 0xbaUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -220,6 +222,8 @@ struct cmd_nums {
#define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
#define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
#define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
#define HWRM_EXEC_FWD_RESP 0xd0UL
#define HWRM_REJECT_FWD_RESP 0xd1UL
#define HWRM_FWD_RESP 0xd2UL
@@ -233,6 +237,7 @@ struct cmd_nums {
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -331,6 +336,7 @@ struct cmd_nums {
#define HWRM_FUNC_VF_BW_CFG 0x195UL
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -341,6 +347,31 @@ struct cmd_nums {
#define HWRM_MFG_OTP_CFG 0x207UL
#define HWRM_MFG_OTP_QCFG 0x208UL
#define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_ATTACH 0x2c7UL
+ #define HWRM_TF_SESSION_CLOSE 0x2c8UL
+ #define HWRM_TF_SESSION_QCFG 0x2c9UL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL
+ #define HWRM_TF_TBL_TYPE_GET 0x2d0UL
+ #define HWRM_TF_TBL_TYPE_SET 0x2d1UL
+ #define HWRM_TF_CTXT_MEM_RGTR 0x2daUL
+ #define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL
+ #define HWRM_TF_EXT_EM_QCAPS 0x2dcUL
+ #define HWRM_TF_EXT_EM_OP 0x2ddUL
+ #define HWRM_TF_EXT_EM_CFG 0x2deUL
+ #define HWRM_TF_EXT_EM_QCFG 0x2dfUL
+ #define HWRM_TF_TCAM_SET 0x2eeUL
+ #define HWRM_TF_TCAM_GET 0x2efUL
+ #define HWRM_TF_TCAM_MOVE 0x2f0UL
+ #define HWRM_TF_TCAM_FREE 0x2f1UL
+ #define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -356,6 +387,10 @@ struct cmd_nums {
#define HWRM_DBG_RING_INFO_GET 0xff1cUL
#define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
#define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -429,8 +464,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 12
-#define HWRM_VERSION_STR "1.10.1.12"
+#define HWRM_VERSION_RSVD 33
+#define HWRM_VERSION_STR "1.10.1.33"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -482,6 +517,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -647,6 +683,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1089,7 +1126,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:640b/80B) */
+/* hwrm_func_qcaps_output (size:704b/88B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1126,6 +1163,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
#define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
#define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1146,7 +1187,12 @@ struct hwrm_func_qcaps_output {
__le32 max_flow_id;
__le32 max_hw_ring_grps;
__le16 max_sp_tx_rings;
- u8 unused_0;
+ u8 unused_0[2];
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ u8 unused_1[3];
u8 valid;
};
@@ -1161,7 +1207,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:704b/88B) */
+/* hwrm_func_qcfg_output (size:768b/96B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1267,7 +1313,11 @@ struct hwrm_func_qcfg_output {
u8 always_1;
__le32 reset_addr_poll;
__le16 legacy_l2_db_size_kb;
- u8 unused_2[1];
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 unused_2[7];
u8 valid;
};
@@ -1420,9 +1470,10 @@ struct hwrm_func_qstats_input {
__le64 resp_addr;
__le16 fid;
u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY
+ #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK
u8 unused_0[5];
};
@@ -1456,6 +1507,53 @@ struct hwrm_func_qstats_output {
u8 valid;
};
+/* hwrm_func_qstats_ext_input (size:192b/24B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
+};
+
+/* hwrm_func_qstats_ext_output (size:1472b/184B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
__le16 req_type;
@@ -1808,7 +1906,7 @@ struct hwrm_func_backing_store_qcaps_output {
u8 ctx_kind_initializer;
__le32 rsvd;
__le16 rsvd1;
- u8 rsvd2;
+ u8 tqm_fp_rings_count;
u8 valid;
};
@@ -2231,7 +2329,17 @@ struct hwrm_error_recovery_qcfg_output {
#define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
__le32 reset_reg_val[16];
u8 delay_after_reset[16];
- u8 unused_1[7];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
u8 valid;
};
@@ -2934,7 +3042,11 @@ struct hwrm_port_qstats_input {
__le16 target_id;
__le64 resp_addr;
__le16 port_id;
- u8 unused_0[6];
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
@@ -3058,7 +3170,11 @@ struct hwrm_port_qstats_ext_input {
__le16 port_id;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[2];
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0;
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
@@ -3840,14 +3956,22 @@ struct hwrm_queue_pfcenable_qcfg_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
u8 unused_0[3];
u8 valid;
};
@@ -3860,14 +3984,22 @@ struct hwrm_queue_pfcenable_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
__le16 port_id;
u8 unused_0[2];
};
@@ -5287,7 +5419,11 @@ struct hwrm_ring_cmpl_ring_qaggint_params_input {
__le16 target_id;
__le64 resp_addr;
__le16 ring_id;
- u8 unused_0[6];
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
};
/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
@@ -7618,7 +7754,9 @@ struct hwrm_nvm_modify_input {
__le64 resp_addr;
__le64 host_src_addr;
__le16 dir_idx;
- u8 unused_0[2];
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
__le32 offset;
__le32 len;
u8 unused_1[4];
@@ -8027,4 +8165,18 @@ struct hwrm_selftest_irq_output {
u8 valid;
};
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+};
+
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 6ea3df6da18c..3a9a51f7063a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
if (old_setting == setting)
return 0;
- func_flags = vf->func_flags;
if (setting)
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
+ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
else
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
+ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
@@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
req.flags = cpu_to_le32(func_flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
- vf->func_flags = func_flags;
if (setting)
vf->flags |= BNXT_VF_SPOOFCHK;
else
@@ -228,7 +226,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
memcpy(vf->mac_addr, mac, ETH_ALEN);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -266,7 +263,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
req.dflt_vlan = cpu_to_le16(vlan_tag);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -305,7 +301,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req.max_bw = cpu_to_le32(max_tx_rate);
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
@@ -477,7 +472,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
vf = &bp->pf.vf[vf_id];
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
if (is_valid_ether_addr(vf->mac_addr)) {
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
@@ -651,7 +645,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
- mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
req.mru = cpu_to_le16(mtu);
req.mtu = cpu_to_le16(mtu);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 4a316c4b3fa8..8c8368c2f335 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -104,7 +104,13 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
- ent[i].db_offset = (idx + i) * 0x80;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ ent[i].db_offset = DB_PF_OFFSET_P5;
+ if (BNXT_VF(bp))
+ ent[i].db_offset = DB_VF_OFFSET_P5;
+ } else {
+ ent[i].db_offset = (idx + i) * 0x80;
+ }
}
}
@@ -475,6 +481,8 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
edev->net = dev;
edev->pdev = bp->pdev;
+ edev->l2_db_size = bp->db_size;
+ edev->l2_db_size_nc = bp->db_size;
bp->edev = edev;
}
return bp->edev;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 9895406b9830..6b4d2556a6df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -67,6 +67,14 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ int l2_db_size; /* Doorbell BAR size in
+ * bytes mapped by L2
+ * driver.
+ */
+ int l2_db_size_nc; /* Doorbell BAR size in
+ * bytes mapped as non-
+ * cacheable.
+ */
};
struct bnxt_en_ops {
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 61ab7d21f6bd..c5cca63b8571 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1918,7 +1918,6 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
if (ret) {
atomic_dec(&cp->iscsi_conn);
- ret = 0;
goto done;
}
ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ff98a82b7bc4..7a3b22b35238 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10797,17 +10797,15 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
#ifdef CONFIG_TIGON3_HWMON
static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
{
+ u32 off, len = TG3_OCIR_LEN;
int i;
- for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
- u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
-
+ for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
- off += len;
if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
!(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
- memset(ocir, 0, TG3_OCIR_LEN);
+ memset(ocir, 0, len);
}
}
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 53b50c24d9c9..2c4c12b03502 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -35,8 +35,8 @@ config MACB
config MACB_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
depends on MACB
+ depends on PTP_1588_CLOCK
default y
- imply PTP_1588_CLOCK
---help---
Enable IEEE 1588 Precision Time Protocol (PTP) support for MACB.
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a0e8c5bbabc0..36290a8e2a84 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
int status;
status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0)
+ if (status < 0) {
+ pm_runtime_put_noidle(&bp->pdev->dev);
goto mdio_pm_exit;
+ }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -386,8 +388,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
int status;
status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0)
+ if (status < 0) {
+ pm_runtime_put_noidle(&bp->pdev->dev);
goto mdio_pm_exit;
+ }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -3816,8 +3820,10 @@ static int at91ether_open(struct net_device *dev)
int ret;
ret = pm_runtime_get_sync(&lp->pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&lp->pdev->dev);
return ret;
+ }
/* Clear internal statistics */
ctl = macb_readl(lp, NCR);
@@ -4172,15 +4178,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
static int fu540_c000_init(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
- return -ENODEV;
-
- mgmt->reg = ioremap(res->start, resource_size(res));
- if (!mgmt->reg)
- return -ENOMEM;
+ mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mgmt->reg))
+ return PTR_ERR(mgmt->reg);
return macb_init(pdev);
}
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 6a700d34019e..4520e7ee00fe 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -54,7 +54,7 @@ config THUNDER_NIC_RGX
config CAVIUM_PTP
tristate "Cavium PTP coprocessor as PTP clock"
depends on 64BIT && PCI
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
---help---
This driver adds support for the Precision Time Protocol Clocks and
Timestamping coprocessor (PTP) found on Cavium processors.
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index d7e805749a5b..e40c64b79f66 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -782,7 +782,6 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
if ((work_done < budget && tx_done) ||
(iq && iq->pkt_in_done >= MAX_REG_CNT) ||
(droq->pkt_count >= MAX_REG_CNT)) {
- tx_done = 1;
napi_complete_done(napi, work_done);
octeon_enable_irq(droq->oct_dev, droq->q_no);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index e46a14f44a6f..30d25a37fc3b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -466,8 +466,6 @@ static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
return &((struct mbox_cmd *)&(log)[1])[entry_idx];
}
-#include "t4fw_api.h"
-
#define FW_VERSION(chip) ( \
FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index f5dd34db4b54..6516c45864b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2207,6 +2207,9 @@ static void ethofld_hard_xmit(struct net_device *dev,
if (unlikely(skip_eotx_wr)) {
start = (u64 *)wr;
eosw_txq->state = next_state;
+ eosw_txq->cred -= wrlen16;
+ eosw_txq->ncompl++;
+ eosw_txq->last_compl = 0;
goto write_wr_headers;
}
@@ -2365,6 +2368,34 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
return cxgb4_eth_xmit(skb, dev);
}
+static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
+{
+ int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
+ int pidx = eosw_txq->pidx;
+ struct sk_buff *skb;
+
+ if (!pktcount)
+ return;
+
+ if (pktcount < 0)
+ pktcount += eosw_txq->ndesc;
+
+ while (pktcount--) {
+ pidx--;
+ if (pidx < 0)
+ pidx += eosw_txq->ndesc;
+
+ skb = eosw_txq->desc[pidx].skb;
+ if (skb) {
+ dev_consume_skb_any(skb);
+ eosw_txq->desc[pidx].skb = NULL;
+ eosw_txq->inuse--;
+ }
+ }
+
+ eosw_txq->pidx = eosw_txq->last_pidx + 1;
+}
+
/**
* cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
* @dev - netdevice
@@ -2440,9 +2471,11 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
FW_FLOWC_MNEM_EOSTATE_CLOSING :
FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
- eosw_txq->cred -= len16;
- eosw_txq->ncompl++;
- eosw_txq->last_compl = 0;
+ /* Free up any pending skbs to ensure there's room for
+ * termination FLOWC.
+ */
+ if (tc == FW_SCHED_CLS_NONE)
+ eosw_txq_flush_pending_skbs(eosw_txq);
ret = eosw_txq_enqueue(eosw_txq, skb);
if (ret) {
@@ -2695,6 +2728,7 @@ static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
* is ever running at a time ...
*/
static void service_ofldq(struct sge_uld_txq *q)
+ __must_hold(&q->sendq.lock)
{
u64 *pos, *before, *end;
int credits;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9cc3541a7e1c..cec865a97464 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2480,7 +2480,7 @@ static int setup_debugfs(struct adapter *adapter)
for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
debugfs_create_file(debugfs_files[i].name,
debugfs_files[i].mode,
- adapter->debugfs_root, (void *)adapter,
+ adapter->debugfs_root, adapter,
debugfs_files[i].fops);
return 0;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5bff5c2be88b..8d13ea370db1 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1224,7 +1224,8 @@ map_error:
return -ENOMEM;
}
-static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t gmac_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
unsigned short m = (1 << port->txq_order) - 1;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f16853c3c851..0ccd9994ad45 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -951,7 +951,7 @@ static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 si
static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
static int test_tp(struct net_device *dev, s32 msec);
static int EISA_signature(char *name, struct device *device);
-static int PCI_signature(char *name, struct de4x5_private *lp);
+static void PCI_signature(char *name, struct de4x5_private *lp);
static void DevicePresent(struct net_device *dev, u_long iobase);
static void enet_addr_rst(u_long aprom_addr);
static int de4x5_bad_srom(struct de4x5_private *lp);
@@ -3902,14 +3902,14 @@ EISA_signature(char *name, struct device *device)
/*
** Look for a particular board name in the PCI configuration space
*/
-static int
+static void
PCI_signature(char *name, struct de4x5_private *lp)
{
- int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
+ int i, siglen = ARRAY_SIZE(de4x5_signatures);
if (lp->chipset == DC21040) {
strcpy(name, "DE434/5");
- return status;
+ return;
} else { /* Search for a DEC name in the SROM */
int tmp = *((char *)&lp->srom + 19) * 3;
strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
@@ -3935,8 +3935,6 @@ PCI_signature(char *name, struct de4x5_private *lp)
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
lp->useSROM = true;
}
-
- return status;
}
/*
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 11accab81ea1..0f3e842a4fd6 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -244,6 +244,35 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
ch->xdp.drop_cnt = 0;
}
+static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_eth_xdp_fds *xdp_fds)
+{
+ int total_enqueued = 0, retries = 0, enqueued;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ int num_fds, err, max_retries;
+ struct dpaa2_fd *fds;
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ /* try to enqueue all the FDs until the max number of retries is hit */
+ fds = xdp_fds->fds;
+ num_fds = xdp_fds->num;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fds[total_enqueued],
+ 0, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ percpu_extras->tx_portal_busy += ++retries;
+ continue;
+ }
+ total_enqueued += enqueued;
+ }
+ xdp_fds->num = 0;
+
+ return total_enqueued;
+}
+
static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
void *buf_start, u16 queue_id)
{
@@ -1934,12 +1963,11 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
struct xdp_frame **frames, u32 flags)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int total_enqueued = 0, retries = 0, enqueued;
- struct dpaa2_eth_drv_stats *percpu_extras;
+ struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
struct rtnl_link_stats64 *percpu_stats;
- int num_fds, i, err, max_retries;
struct dpaa2_eth_fq *fq;
struct dpaa2_fd *fds;
+ int enqueued, i, err;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -1948,10 +1976,10 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
return -ENETDOWN;
fq = &priv->fq[smp_processor_id()];
- fds = fq->xdp_fds;
+ xdp_redirect_fds = &fq->xdp_redirect_fds;
+ fds = xdp_redirect_fds->fds;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
- percpu_extras = this_cpu_ptr(priv->percpu_extras);
/* create a FD for each xdp_frame in the list received */
for (i = 0; i < n; i++) {
@@ -1959,28 +1987,19 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
if (err)
break;
}
- num_fds = i;
+ xdp_redirect_fds->num = i;
- /* try to enqueue all the FDs until the max number of retries is hit */
- max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
- while (total_enqueued < num_fds && retries < max_retries) {
- err = priv->enqueue(priv, fq, &fds[total_enqueued],
- 0, num_fds - total_enqueued, &enqueued);
- if (err == -EBUSY) {
- percpu_extras->tx_portal_busy += ++retries;
- continue;
- }
- total_enqueued += enqueued;
- }
+ /* enqueue all the frame descriptors */
+ enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
/* update statistics */
- percpu_stats->tx_packets += total_enqueued;
- for (i = 0; i < total_enqueued; i++)
+ percpu_stats->tx_packets += enqueued;
+ for (i = 0; i < enqueued; i++)
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
- for (i = total_enqueued; i < n; i++)
+ for (i = enqueued; i < n; i++)
xdp_return_frame_rx_napi(frames[i]);
- return total_enqueued;
+ return enqueued;
}
static int update_xps(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 43cd8409f2e9..b5f7dbbc2a02 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -310,6 +310,11 @@ enum dpaa2_eth_fq_type {
struct dpaa2_eth_priv;
+struct dpaa2_eth_xdp_fds {
+ struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
+ ssize_t num;
+};
+
struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
@@ -328,7 +333,7 @@ struct dpaa2_eth_fq {
struct dpaa2_eth_fq *fq);
struct dpaa2_eth_fq_stats stats;
- struct dpaa2_fd xdp_fds[DEV_MAP_BULK_SIZE];
+ struct dpaa2_eth_xdp_fds xdp_redirect_fds;
};
struct dpaa2_eth_ch_xdp {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index ebc635f8a4cc..15f37c5b8dc1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -74,8 +74,8 @@ err_pci_mem_reg:
pci_disable_device(pdev);
err_pci_enable:
err_mdiobus_alloc:
- iounmap(port_regs);
err_hw_alloc:
+ iounmap(port_regs);
err_ioremap:
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 48e589e9d0f7..fd3df19eaa32 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -424,7 +424,7 @@ struct enetc_psfp {
spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
};
-struct actions_fwd enetc_act_fwd[] = {
+static struct actions_fwd enetc_act_fwd[] = {
{
BIT(FLOW_ACTION_GATE),
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
@@ -903,7 +903,7 @@ static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index)
enetc_streamfilter_hw_set(priv, sfi, false);
hlist_del(&sfi->node);
kfree(sfi);
- clear_bit(sfi->index, epsfp.psfp_sfi_bitmap);
+ clear_bit(index, epsfp.psfp_sfi_bitmap);
}
}
@@ -963,7 +963,8 @@ revert_sid:
return err;
}
-struct actions_fwd *enetc_check_flow_actions(u64 acts, unsigned int inputkeys)
+static struct actions_fwd *enetc_check_flow_actions(u64 acts,
+ unsigned int inputkeys)
{
int i;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 21a736174fda..1ffe8fac702d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -24,7 +24,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
- HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
+ HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate response to VF */
HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 5602bf226687..7506cabaa16e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -552,6 +552,7 @@ struct hnae3_ae_ops {
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
u32 len, u8 *data);
+ bool (*get_cmdq_stat)(struct hnae3_handle *handle);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index c79d6a391105..9fe40c7773b4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1445,9 +1445,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
bd_num += ret;
- if (!skb_has_frag_list(skb))
- goto out;
-
skb_walk_frags(skb, frag_skb) {
ret = hns3_fill_skb_to_desc(ring, frag_skb,
DESC_TYPE_FRAGLIST_SKB);
@@ -1456,7 +1453,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
bd_num += ret;
}
-out:
+
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 240ba06cd0eb..60f82ad89957 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -46,23 +46,6 @@ enum hns3_nic_state {
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
#define HNS3_RING_EN_REG 0x00090
-#define HNS3_RING_T0_BE_RST 0x00094
-#define HNS3_RING_COULD_BE_RST 0x00098
-#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
-
-#define HNS3_RING_INTMSK_RXWL_REG 0x000A0
-#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4
-#define HNS3_RX_RING_INT_STS_REG 0x000A8
-#define HNS3_RING_INTMSK_TXWL_REG 0x000AC
-#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0
-#define HNS3_TX_RING_INT_STS_REG 0x000B4
-#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8
-#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC
-#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4
-#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8
-
-#define HNS3_RING_MB_CTRL_REG 0x00100
-#define HNS3_RING_MB_DATA_BASE_REG 0x00200
#define HNS3_TX_REG_OFFSET 0x40
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 1a105f2f87a4..6b1545f982aa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -773,8 +773,13 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
/* Only support ksettings_set for netdev with phy attached for now */
- if (netdev->phydev)
+ if (netdev->phydev) {
+ if (cmd->base.speed == SPEED_1000 &&
+ cmd->base.autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+ }
if (handle->pdev->revision == 0x20)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 9a9d752aedc5..e3bab8f3847f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -184,11 +184,11 @@ enum hclge_opcode_type {
/* TQP commands */
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
- HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
- HCLGE_OPC_QUERY_RX_STATUS = 0x0B13,
+ HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 6cfa8253eefc..26f6f068b01d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -691,7 +691,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
enum hclge_opcode_type cmd;
struct hclge_desc desc;
int queue_id, group_id;
- u32 qset_maping[32];
+ u32 qset_mapping[32];
int tc_id, qset_id;
int pri_id, ret;
u32 i;
@@ -746,7 +746,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
if (ret)
goto err_tm_map_cmd_send;
- qset_maping[group_id] =
+ qset_mapping[group_id] =
le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
}
@@ -756,11 +756,11 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
for (group_id = 0; group_id < 4; group_id++) {
dev_info(&hdev->pdev->dev,
"%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_maping[(u32)(i + 7)],
- qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
- qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
- qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
- qset_maping[i]);
+ group_id * 256, qset_mapping[(u32)(i + 7)],
+ qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)],
+ qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)],
+ qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)],
+ qset_mapping[i]);
i += 8;
}
@@ -1258,6 +1258,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
+#define HCLGE_NCL_CONFIG_PARAM_NUM 2
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
@@ -1267,13 +1268,17 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
int ret;
ret = sscanf(cmd_buf, "%x %x", &offset, &length);
- if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
- length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
- dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
+ if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Too few parameters, num = %d.\n", ret);
return;
}
- if (offset < 0 || length <= 0) {
- dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
+
+ if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
+ length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
+ dev_err(&hdev->pdev->dev,
+ "Invalid input, offset = %d, length = %d.\n",
+ offset, length);
return;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 876fd81ad2f1..608fe26fc3fe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -16,7 +16,6 @@
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
-#define HCLGE_VECTOR0_PF_OTHER_INT_STS_REG 0x20800
#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 71a54ddb51f5..b796d3fb5b0b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -552,7 +552,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
@@ -572,7 +572,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_TX_STATUS,
+ HCLGE_OPC_QUERY_TX_STATS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
@@ -1363,10 +1363,8 @@ static int hclge_configure(struct hclge_dev *hdev)
int ret;
ret = hclge_get_cfg(hdev, &cfg);
- if (ret) {
- dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
+ if (ret)
return ret;
- }
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
@@ -2968,13 +2966,11 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
- u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
+ u32 cmdq_src_reg, msix_src_reg;
/* fetch the events from their corresponding regs */
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
- msix_src_reg = hclge_read_dev(&hdev->hw,
- HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
+ msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
/* Assumption: If by any chance reset and mailbox events are reported
* together then we will only process reset event in this go and will
@@ -2984,7 +2980,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*
* check for vector0 reset event sources
*/
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
@@ -2993,7 +2989,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
@@ -3483,7 +3479,7 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
- HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
+ HCLGE_MISC_VECTOR_INT_STS);
/* we will intentionally ignore any errors from this function
* as we will end up in *some* reset request in any case
*/
@@ -6404,6 +6400,14 @@ static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
}
+static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+}
+
static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -11313,6 +11317,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
.get_module_eeprom = hclge_get_module_eeprom,
+ .get_cmdq_stat = hclge_get_cmdq_stat,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index ac70fafd15d5..0874ae47cb03 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -742,7 +742,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret = hclge_get_link_info(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get link stat for VF\n",
+ "failed to inform link stat to VF, ret = %d\n",
ret);
break;
case HCLGE_MBX_QUEUE_RESET:
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index a621ebbf7610..48b40be3e84d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -69,6 +69,8 @@ struct hinic_dev {
struct hinic_txq *txqs;
struct hinic_rxq *rxqs;
+ u16 sq_depth;
+ u16 rq_depth;
struct hinic_txq_stats tx_stats;
struct hinic_rxq_stats rx_stats;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 966aea949c0b..ace18d258049 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -33,6 +33,99 @@
#include "hinic_rx.h"
#include "hinic_dev.h"
+#define SET_LINK_STR_MAX_LEN 128
+
+#define GET_SUPPORTED_MODE 0
+#define GET_ADVERTISED_MODE 1
+
+#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= SUPPORTED_##mode)
+#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= ADVERTISED_##mode)
+
+struct hw2ethtool_link_mode {
+ enum ethtool_link_mode_bit_indices link_mode_bit;
+ u32 speed;
+ enum hinic_link_mode hw_link_mode;
+};
+
+struct cmd_link_settings {
+ u64 supported;
+ u64 advertising;
+
+ u32 speed;
+ u8 duplex;
+ u8 port;
+ u8 autoneg;
+};
+
+static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
+ SPEED_10, SPEED_100,
+ SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000,
+ SPEED_100000
+};
+
+static struct hw2ethtool_link_mode
+ hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = {
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = SPEED_10000,
+ .hw_link_mode = HINIC_10GE_BASE_KR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = SPEED_40000,
+ .hw_link_mode = HINIC_40GE_BASE_KR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = SPEED_40000,
+ .hw_link_mode = HINIC_40GE_BASE_CR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = SPEED_100000,
+ .hw_link_mode = HINIC_100GE_BASE_KR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = SPEED_100000,
+ .hw_link_mode = HINIC_100GE_BASE_CR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_KR_S,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_CR_S,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_KR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_CR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = SPEED_1000,
+ .hw_link_mode = HINIC_GE_BASE_KX,
+ },
+};
+
static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
enum hinic_speed speed)
{
@@ -71,18 +164,91 @@ static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
}
}
+static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
+{
+ int i = 0;
+
+ for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) {
+ if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode)
+ break;
+ }
+
+ return i;
+}
+
+static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
+ enum hinic_link_mode hw_link_mode,
+ u32 name)
+{
+ enum hinic_link_mode link_mode;
+ int idx = 0;
+
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (hw_link_mode & ((u32)1 << link_mode)) {
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (name == GET_SUPPORTED_MODE)
+ ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
+ (link_settings, idx);
+ else
+ ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
+ (link_settings, idx);
+ }
+ }
+}
+
+static void hinic_link_port_type(struct cmd_link_settings *link_settings,
+ enum hinic_port_type port_type)
+{
+ switch (port_type) {
+ case HINIC_PORT_ELEC:
+ case HINIC_PORT_TP:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP);
+ link_settings->port = PORT_TP;
+ break;
+
+ case HINIC_PORT_AOC:
+ case HINIC_PORT_FIBRE:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
+ link_settings->port = PORT_FIBRE;
+ break;
+
+ case HINIC_PORT_COPPER:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
+ link_settings->port = PORT_DA;
+ break;
+
+ case HINIC_PORT_BACKPLANE:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane);
+ link_settings->port = PORT_NONE;
+ break;
+
+ default:
+ link_settings->port = PORT_OTHER;
+ break;
+ }
+}
+
static int hinic_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings
*link_ksettings)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_link_mode_cmd link_mode = { 0 };
+ struct hinic_pause_config pause_info = { 0 };
+ struct cmd_link_settings settings = { 0 };
enum hinic_port_link_state link_state;
struct hinic_port_cap port_cap;
int err;
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
- Autoneg);
link_ksettings->base.speed = SPEED_UNKNOWN;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
@@ -92,14 +258,19 @@ static int hinic_get_link_ksettings(struct net_device *netdev,
if (err)
return err;
+ hinic_link_port_type(&settings, port_cap.port_type);
+ link_ksettings->base.port = settings.port;
+
err = hinic_port_link_state(nic_dev, &link_state);
if (err)
return err;
- if (link_state != HINIC_LINK_STATE_UP)
- return err;
-
- set_link_speed(link_ksettings, port_cap.speed);
+ if (link_state == HINIC_LINK_STATE_UP) {
+ set_link_speed(link_ksettings, port_cap.speed);
+ link_ksettings->base.duplex =
+ (port_cap.duplex == HINIC_DUPLEX_FULL) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
ethtool_link_ksettings_add_link_mode(link_ksettings,
@@ -108,11 +279,243 @@ static int hinic_get_link_ksettings(struct net_device *netdev,
if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
link_ksettings->base.autoneg = AUTONEG_ENABLE;
- link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ?
- DUPLEX_FULL : DUPLEX_HALF;
+ err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
+ link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
+ return -EIO;
+
+ hinic_add_ethtool_link_mode(&settings, link_mode.supported,
+ GET_SUPPORTED_MODE);
+ hinic_add_ethtool_link_mode(&settings, link_mode.advertised,
+ GET_ADVERTISED_MODE);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
+ if (err)
+ return err;
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause);
+ if (pause_info.rx_pause && pause_info.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
+ } else if (pause_info.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
+ } else if (pause_info.rx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
+ }
+ }
+
+ bitmap_copy(link_ksettings->link_modes.supported,
+ (unsigned long *)&settings.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_copy(link_ksettings->link_modes.advertising,
+ (unsigned long *)&settings.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ return 0;
+}
+
+static int hinic_ethtool_to_hw_speed_level(u32 speed)
+{
+ int i;
+
+ for (i = 0; i < LINK_SPEED_LEVELS; i++) {
+ if (hw_to_ethtool_speed[i] == speed)
+ break;
+ }
+
+ return i;
+}
+
+static bool hinic_is_support_speed(enum hinic_link_mode supported_link,
+ u32 speed)
+{
+ enum hinic_link_mode link_mode;
+ int idx;
+
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (!(supported_link & ((u32)1 << link_mode)))
+ continue;
+
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (hw_to_ethtool_link_mode_table[idx].speed == speed)
+ return true;
+ }
+
+ return false;
+}
+
+static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed)
+{
+ struct hinic_link_mode_cmd link_mode = { 0 };
+ struct net_device *netdev = nic_dev->netdev;
+ enum nic_speed_level speed_level = 0;
+ int err;
+
+ err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ if (err)
+ return false;
+
+ if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
+ link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
+ return false;
+
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ if (speed_level >= LINK_SPEED_LEVELS ||
+ !hinic_is_support_speed(link_mode.supported, speed)) {
+ netif_err(nic_dev, drv, netdev,
+ "Unsupported speed: %d\n", speed);
+ return false;
+ }
+
+ return true;
+}
+
+static int get_link_settings_type(struct hinic_dev *nic_dev,
+ u8 autoneg, u32 speed, u32 *set_settings)
+{
+ struct hinic_port_cap port_cap = { 0 };
+ int err;
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err)
+ return err;
+
+ /* always set autonegotiation */
+ if (port_cap.autoneg_cap)
+ *set_settings |= HILINK_LINK_SET_AUTONEG;
+
+ if (autoneg == AUTONEG_ENABLE) {
+ if (!port_cap.autoneg_cap) {
+ netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (speed != (u32)SPEED_UNKNOWN) {
+ /* set speed only when autoneg is disabled */
+ if (!hinic_is_speed_legal(nic_dev, speed))
+ return -EINVAL;
+ *set_settings |= HILINK_LINK_SET_SPEED;
+ } else {
+ netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
+static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev,
+ u32 set_settings, u8 autoneg,
+ u32 speed)
+{
+ enum nic_speed_level speed_level = 0;
+ int err = 0;
+
+ if (set_settings & HILINK_LINK_SET_AUTONEG) {
+ err = hinic_set_autoneg(nic_dev->hwdev,
+ (autoneg == AUTONEG_ENABLE));
+ if (err)
+ netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n",
+ (autoneg == AUTONEG_ENABLE) ?
+ "Enable" : "Disable");
+ else
+ netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n",
+ (autoneg == AUTONEG_ENABLE) ?
+ "Enable" : "Disable");
+ }
+
+ if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = hinic_set_speed(nic_dev->hwdev, speed_level);
+ if (err)
+ netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n",
+ speed);
+ else
+ netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n",
+ speed);
+ }
+
+ return err;
+}
+
+static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
+ u32 set_settings, u8 autoneg, u32 speed)
+{
+ struct hinic_link_ksettings_info settings = {0};
+ char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
+ struct net_device *netdev = nic_dev->netdev;
+ enum nic_speed_level speed_level = 0;
+ int err;
+
+ err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, "%s",
+ (set_settings & HILINK_LINK_SET_AUTONEG) ?
+ (autoneg ? "autong enable " : "autong disable ") : "");
+ if (err < 0 || err >= SET_LINK_STR_MAX_LEN) {
+ netif_err(nic_dev, drv, netdev, "Failed to snprintf link state, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+
+ if (set_settings & HILINK_LINK_SET_SPEED) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
+ "%sspeed %d ", set_link_str, speed);
+ if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) {
+ netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+ }
+
+ settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
+ settings.valid_bitmap = set_settings;
+ settings.autoneg = autoneg;
+ settings.speed = speed_level;
+
+ err = hinic_set_link_settings(nic_dev->hwdev, &settings);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
+ if (err)
+ netif_err(nic_dev, drv, netdev, "Set %s failed\n",
+ set_link_str);
+ else
+ netif_info(nic_dev, drv, netdev, "Set %s successfully\n",
+ set_link_str);
+
+ return err;
+ }
+
+ return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg,
+ speed);
+}
+
+static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u32 set_settings = 0;
+ int err;
+
+ err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings);
+ if (err)
+ return err;
+
+ if (set_settings)
+ err = hinic_set_settings_to_hw(nic_dev, set_settings,
+ autoneg, speed);
+ else
+ netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n");
+
+ return err;
+}
+
+static int hinic_set_link_ksettings(struct net_device *netdev, const struct
+ ethtool_link_ksettings *link_settings)
+{
+ /* only support to set autoneg and speed */
+ return set_link_settings(netdev, link_settings->base.autoneg,
+ link_settings->base.speed);
+}
+
static void hinic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -135,12 +538,81 @@ static void hinic_get_drvinfo(struct net_device *netdev,
static void hinic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
- ring->rx_max_pending = HINIC_RQ_DEPTH;
- ring->tx_max_pending = HINIC_SQ_DEPTH;
- ring->rx_pending = HINIC_RQ_DEPTH;
- ring->tx_pending = HINIC_SQ_DEPTH;
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
+ ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
+ ring->rx_pending = nic_dev->rq_depth;
+ ring->tx_pending = nic_dev->sq_depth;
}
+static int check_ringparam_valid(struct hinic_dev *nic_dev,
+ struct ethtool_ringparam *ring)
+{
+ if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Unsupported rx_jumbo_pending/rx_mini_pending\n");
+ return -EINVAL;
+ }
+
+ if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH ||
+ ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
+ ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
+ ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Queue depth out of range [%d-%d]\n",
+ HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 new_sq_depth, new_rq_depth;
+ int err;
+
+ err = check_ringparam_valid(nic_dev, ring);
+ if (err)
+ return err;
+
+ new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
+ new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
+
+ if (new_sq_depth == nic_dev->sq_depth &&
+ new_rq_depth == nic_dev->rq_depth)
+ return 0;
+
+ netif_info(nic_dev, drv, netdev,
+ "Change Tx/Rx ring depth from %d/%d to %d/%d\n",
+ nic_dev->sq_depth, nic_dev->rq_depth,
+ new_sq_depth, new_rq_depth);
+
+ nic_dev->sq_depth = new_sq_depth;
+ nic_dev->rq_depth = new_rq_depth;
+
+ if (netif_running(netdev)) {
+ netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
+ err = hinic_close(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to close netdev\n");
+ return -EFAULT;
+ }
+
+ err = hinic_open(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
static void hinic_get_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
@@ -741,9 +1213,11 @@ static void hinic_get_strings(struct net_device *netdev,
static const struct ethtool_ops hinic_ethtool_ops = {
.get_link_ksettings = hinic_get_link_ksettings,
+ .set_link_ksettings = hinic_set_link_ksettings,
.get_drvinfo = hinic_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = hinic_get_ringparam,
+ .set_ringparam = hinic_set_ringparam,
.get_channels = hinic_get_channels,
.get_rxnfc = hinic_get_rxnfc,
.set_rxnfc = hinic_set_rxnfc,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 33c5333657c1..cb5b6e5f787f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -849,6 +849,25 @@ err_init_cmdq:
return err;
}
+static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct hinic_cmd_hw_ioctxt hw_ioctxt = { 0 };
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+
+ hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE;
+ hw_ioctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_HWCTXT_SET,
+ &hw_ioctxt, sizeof(hw_ioctxt), NULL,
+ NULL, HINIC_MGMT_MSG_SYNC);
+}
+
/**
* hinic_init_cmdqs - init all cmdqs
* @cmdqs: cmdqs to init
@@ -899,8 +918,18 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
cmdq_ceq_handler);
+
+ err = hinic_set_cmdq_depth(hwdev, CMDQ_DEPTH);
+ if (err) {
+ dev_err(&hwif->pdev->dev, "Failed to set cmdq depth\n");
+ goto err_set_cmdq_depth;
+ }
+
return 0;
+err_set_cmdq_depth:
+ hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
+
err_cmdq_ctxt:
hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
HINIC_MAX_CMDQ_TYPES);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index e5cab58e4ddd..0245da02efbb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -44,10 +44,6 @@ enum io_status {
IO_RUNNING = 1,
};
-enum hw_ioctxt_set_cmdq_depth {
- HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
-};
-
/**
* get_capability - convert device capabilities to NIC capabilities
* @hwdev: the HW device to set and convert device capabilities for
@@ -223,6 +219,19 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
HINIC_MGMT_MSG_SYNC);
}
+int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_HILINK, cmd,
+ buf_in, in_size, buf_out, out_size,
+ HINIC_MGMT_MSG_SYNC);
+}
+
/**
* init_fw_ctxt- Init Firmware tables before network mgmt and io operations
* @hwdev: the NIC HW device
@@ -260,8 +269,8 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
*
* Return 0 - Success, negative - Failure
**/
-static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
- unsigned int sq_depth)
+static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int sq_depth,
+ unsigned int rq_depth)
{
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_cmd_hw_ioctxt hw_ioctxt;
@@ -426,7 +435,7 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
*
* Return 0 - Success, negative - Failure
**/
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
+int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth)
{
struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
struct hinic_cap *nic_cap = &hwdev->nic_cap;
@@ -449,6 +458,9 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
ceq_msix_entries = &hwdev->msix_entries[num_aeqs];
func_to_io->hwdev = hwdev;
+ func_to_io->sq_depth = sq_depth;
+ func_to_io->rq_depth = rq_depth;
+
err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
ceq_msix_entries);
if (err) {
@@ -473,7 +485,7 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
hinic_db_state_set(hwif, HINIC_DB_ENABLE);
}
- err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH);
+ err = set_hw_ioctxt(hwdev, sq_depth, rq_depth);
if (err) {
dev_err(&pdev->dev, "Failed to set HW IO ctxt\n");
goto err_hw_ioctxt;
@@ -667,6 +679,32 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
}
+static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmd_l2nic_reset l2nic_reset = {0};
+ u16 out_size = sizeof(l2nic_reset);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ l2nic_reset.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ /* 0 represents standard l2nic reset flow */
+ l2nic_reset.reset_flag = 0;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_L2NIC_RESET, &l2nic_reset,
+ sizeof(l2nic_reset), &l2nic_reset,
+ &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || l2nic_reset.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, l2nic_reset.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* hinic_init_hwdev - Initialize the NIC HW
* @pdev: the NIC pci device
@@ -729,6 +767,10 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
goto err_init_pfhwdev;
}
+ err = hinic_l2nic_reset(hwdev);
+ if (err)
+ goto err_l2nic_reset;
+
err = get_dev_cap(hwdev);
if (err) {
dev_err(&pdev->dev, "Failed to get device capabilities\n");
@@ -759,6 +801,7 @@ err_resources_state:
err_init_fw_ctxt:
hinic_vf_func_free(hwdev);
err_vf_func_init:
+err_l2nic_reset:
err_dev_cap:
free_pfhwdev(pfhwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 531d1072e0df..71ea7e46dbbc 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -25,6 +25,7 @@
#define HINIC_PF_SET_VF_ALREADY 0x4
#define HINIC_MGMT_STATUS_EXIST 0x6
+#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF
struct hinic_cap {
u16 max_qps;
@@ -33,6 +34,11 @@ struct hinic_cap {
u16 max_vf_qps;
};
+enum hw_ioctxt_set_cmdq_depth {
+ HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
+ HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE,
+};
+
enum hinic_port_cmd {
HINIC_PORT_CMD_VF_REGISTER = 0x0,
HINIC_PORT_CMD_VF_UNREGISTER = 0x1,
@@ -48,6 +54,9 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_SET_RX_MODE = 12,
+ HINIC_PORT_CMD_GET_PAUSE_INFO = 20,
+ HINIC_PORT_CMD_SET_PAUSE_INFO = 21,
+
HINIC_PORT_CMD_GET_LINK_STATE = 24,
HINIC_PORT_CMD_SET_LRO = 25,
@@ -86,12 +95,16 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_FWCTXT_INIT = 69,
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK = 78,
+
HINIC_PORT_CMD_GET_MGMT_VERSION = 88,
HINIC_PORT_CMD_SET_FUNC_STATE = 93,
HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
+ HINIC_PORT_CMD_SET_VF_RATE = 105,
+
HINIC_PORT_CMD_SET_VF_VLAN = 106,
HINIC_PORT_CMD_CLR_VF_VLAN,
@@ -106,7 +119,21 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_GET_CAP = 170,
+ HINIC_PORT_CMD_GET_LINK_MODE = 217,
+
+ HINIC_PORT_CMD_SET_SPEED = 218,
+
+ HINIC_PORT_CMD_SET_AUTONEG = 219,
+
HINIC_PORT_CMD_SET_LRO_TIMER = 244,
+
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 249,
+};
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hinic_hilink_cmd {
+ HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
};
enum hinic_ucode_cmd {
@@ -247,6 +274,15 @@ struct hinic_cmd_hw_ci {
u64 ci_addr;
};
+struct hinic_cmd_l2nic_reset {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 reset_flag;
+};
+
struct hinic_hwdev {
struct hinic_hwif *hwif;
struct msix_entry *msix_entries;
@@ -307,7 +343,11 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
void *buf_in, u16 in_size, void *buf_out,
u16 *out_size);
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev);
+int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+
+int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 3fbd2eb80582..cf127d896ba6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include "hinic_hw_csr.h"
#include "hinic_hw_if.h"
@@ -18,6 +19,8 @@
#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
+#define WAIT_HWIF_READY_TIMEOUT 10000
+
/**
* hinic_msix_attr_set - set message attribute for msix entry
* @hwif: the HW interface of a pci function device
@@ -187,20 +190,39 @@ void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
**/
static int hwif_ready(struct hinic_hwif *hwif)
{
- struct pci_dev *pdev = hwif->pdev;
u32 addr, attr1;
addr = HINIC_CSR_FUNC_ATTR1_ADDR;
attr1 = hinic_hwif_read_reg(hwif, addr);
- if (!HINIC_FA1_GET(attr1, INIT_STATUS)) {
- dev_err(&pdev->dev, "hwif status is not ready\n");
- return -EFAULT;
+ if (!HINIC_FA1_GET(attr1, MGMT_INIT_STATUS))
+ return -EBUSY;
+
+ if (HINIC_IS_VF(hwif)) {
+ if (!HINIC_FA1_GET(attr1, PF_INIT_STATUS))
+ return -EBUSY;
}
return 0;
}
+static int wait_hwif_ready(struct hinic_hwif *hwif)
+{
+ unsigned long timeout = 0;
+
+ do {
+ if (!hwif_ready(hwif))
+ return 0;
+
+ usleep_range(999, 1000);
+ timeout++;
+ } while (timeout <= WAIT_HWIF_READY_TIMEOUT);
+
+ dev_err(&hwif->pdev->dev, "Wait for hwif timeout\n");
+
+ return -EBUSY;
+}
+
/**
* set_hwif_attr - set the attributes in the relevant members in hwif
* @hwif: the HW interface of a pci function device
@@ -373,7 +395,7 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
goto err_map_intr_bar;
}
- err = hwif_ready(hwif);
+ err = wait_hwif_ready(hwif);
if (err) {
dev_err(&pdev->dev, "HW interface is not ready\n");
goto err_hwif_ready;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index 53bb89c1dd26..0872e035faa1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -55,13 +55,15 @@
#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20
#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24
/* reserved members - off 27 */
-#define HINIC_FA1_INIT_STATUS_SHIFT 30
+#define HINIC_FA1_MGMT_INIT_STATUS_SHIFT 30
+#define HINIC_FA1_PF_INIT_STATUS_SHIFT 31
#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3
#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7
#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF
#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7
-#define HINIC_FA1_INIT_STATUS_MASK 0x1
+#define HINIC_FA1_MGMT_INIT_STATUS_MASK 0x1
+#define HINIC_FA1_PF_INIT_STATUS_MASK 0x1
#define HINIC_FA1_GET(val, member) \
(((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK)
@@ -190,7 +192,7 @@ enum hinic_mod_type {
HINIC_MOD_COMM = 0, /* HW communication module */
HINIC_MOD_L2NIC = 1, /* L2NIC module */
HINIC_MOD_CFGM = 7, /* Configuration module */
-
+ HINIC_MOD_HILINK = 14, /* Hilink module */
HINIC_MOD_MAX = 15
};
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index a4581c988a63..3e3fa742e476 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -282,7 +282,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
- HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
+ func_to_io->sq_depth, HINIC_SQ_WQE_MAX_SIZE);
if (err) {
dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
return err;
@@ -290,7 +290,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
- HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
+ func_to_io->rq_depth, HINIC_RQ_WQE_SIZE);
if (err) {
dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
goto err_rq_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
index 28c0594f636d..214f162f7579 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
@@ -60,6 +60,9 @@ struct hinic_func_to_io {
struct hinic_qp *qps;
u16 max_qps;
+ u16 sq_depth;
+ u16 rq_depth;
+
void __iomem **sq_db;
void __iomem *db_base;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index 564fb2294a29..bc2f87e6cb5d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -627,7 +627,7 @@ wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
struct hinic_hwdev *hwdev = func_to_func->hwdev;
struct completion *done = &send_mbox->send_done;
u32 cnt = 0;
- ulong jif;
+ unsigned long jif;
if (poll) {
while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
@@ -869,7 +869,7 @@ int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
{
struct hinic_recv_mbox *mbox_for_resp;
struct mbox_msg_info msg_info = {0};
- ulong timeo;
+ unsigned long timeo;
int err;
mbox_for_resp = &func_to_func->mbox_resp[dst_func];
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index 20c5c8ea452e..fcf7bfe4aa47 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -643,6 +643,7 @@ void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
/* increment prod_idx to the next */
prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ prod_idx = SQ_MASKED_IDX(sq, prod_idx);
wmb(); /* Write all before the doorbell */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index c30d092e48d5..ca3e2d060284 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -44,6 +44,9 @@
#define HINIC_SQ_DEPTH SZ_4K
#define HINIC_RQ_DEPTH SZ_4K
+#define HINIC_MAX_QUEUE_DEPTH SZ_4K
+#define HINIC_MIN_QUEUE_DEPTH 128
+
/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */
#define HINIC_RX_BUF_SZ 2048
#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index b66bb86cff96..e3ff119fe341 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -372,14 +372,15 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev)
netif_err(nic_dev, drv, netdev, "Failed to init rss\n");
}
-static int hinic_open(struct net_device *netdev)
+int hinic_open(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
enum hinic_port_link_state link_state;
int err, ret;
if (!(nic_dev->flags & HINIC_INTF_UP)) {
- err = hinic_hwdev_ifup(nic_dev->hwdev);
+ err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth,
+ nic_dev->rq_depth);
if (err) {
netif_err(nic_dev, drv, netdev,
"Failed - HW interface up\n");
@@ -427,10 +428,6 @@ static int hinic_open(struct net_device *netdev)
goto err_func_port_state;
}
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- /* Wait up to 3 sec between port enable to link state */
- msleep(3000);
-
down(&nic_dev->mgmt_lock);
err = hinic_port_link_state(nic_dev, &link_state);
@@ -487,7 +484,7 @@ err_create_txqs:
return err;
}
-static int hinic_close(struct net_device *netdev)
+int hinic_close(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags;
@@ -766,10 +763,12 @@ static void hinic_set_rx_mode(struct net_device *netdev)
HINIC_RX_MODE_MC |
HINIC_RX_MODE_BC;
- if (netdev->flags & IFF_PROMISC)
- rx_mode |= HINIC_RX_MODE_PROMISC;
- else if (netdev->flags & IFF_ALLMULTI)
+ if (netdev->flags & IFF_PROMISC) {
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ rx_mode |= HINIC_RX_MODE_PROMISC;
+ } else if (netdev->flags & IFF_ALLMULTI) {
rx_mode |= HINIC_RX_MODE_MC_ALL;
+ }
rx_mode_work->rx_mode = rx_mode;
@@ -868,6 +867,9 @@ static const struct net_device_ops hinic_netdev_ops = {
.ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
.ndo_get_vf_config = hinic_ndo_get_vf_config,
.ndo_set_vf_trust = hinic_ndo_set_vf_trust,
+ .ndo_set_vf_rate = hinic_ndo_set_vf_bw,
+ .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
+ .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
};
static const struct net_device_ops hinicvf_netdev_ops = {
@@ -1037,6 +1039,8 @@ static int nic_dev_init(struct pci_dev *pdev)
nic_dev->rxqs = NULL;
nic_dev->tx_weight = tx_weight;
nic_dev->rx_weight = rx_weight;
+ nic_dev->sq_depth = HINIC_SQ_DEPTH;
+ nic_dev->rq_depth = HINIC_RQ_DEPTH;
nic_dev->sriov_info.hwdev = hwdev;
nic_dev->sriov_info.pdev = pdev;
@@ -1232,6 +1236,8 @@ static void hinic_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
+
hinic_hwdev_cb_unregister(nic_dev->hwdev,
HINIC_MGMT_MSG_CMD_LINK_STATUS);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index b7fe0adcc29a..175c0ee00038 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -66,15 +66,15 @@ static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
return -EFAULT;
}
- if (cmd == HINIC_PORT_CMD_SET_MAC && port_mac_cmd.status ==
- HINIC_PF_SET_VF_ALREADY) {
- dev_warn(&pdev->dev, "PF has already set VF mac, Ignore set operation\n");
+ if (port_mac_cmd.status == HINIC_PF_SET_VF_ALREADY) {
+ dev_warn(&pdev->dev, "PF has already set VF mac, ignore %s operation\n",
+ (op == MAC_SET) ? "set" : "del");
return HINIC_PF_SET_VF_ALREADY;
}
if (cmd == HINIC_PORT_CMD_SET_MAC && port_mac_cmd.status ==
HINIC_MGMT_STATUS_EXIST)
- dev_warn(&pdev->dev, "MAC is repeated. Ignore set operation\n");
+ dev_warn(&pdev->dev, "MAC is repeated, ignore set operation\n");
return 0;
}
@@ -473,7 +473,7 @@ int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
rq_num.func_id = HINIC_HWIF_FUNC_IDX(hwif);
rq_num.num_rqs = num_rqs;
- rq_num.rq_depth = ilog2(HINIC_SQ_DEPTH);
+ rq_num.rq_depth = ilog2(nic_dev->rq_depth);
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP,
&rq_num, sizeof(rq_num),
@@ -1072,3 +1072,132 @@ int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver)
return 0;
}
+
+int hinic_get_link_mode(struct hinic_hwdev *hwdev,
+ struct hinic_link_mode_cmd *link_mode)
+{
+ u16 out_size;
+ int err;
+
+ if (!hwdev || !link_mode)
+ return -EINVAL;
+
+ out_size = sizeof(*link_mode);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
+ link_mode, sizeof(*link_mode),
+ link_mode, &out_size);
+ if (err || !out_size || link_mode->status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_mode->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable)
+{
+ struct hinic_set_autoneg_cmd autoneg = {0};
+ u16 out_size = sizeof(autoneg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ autoneg.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ autoneg.enable = enable;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_AUTONEG,
+ &autoneg, sizeof(autoneg),
+ &autoneg, &out_size);
+ if (err || !out_size || autoneg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to %s autoneg, err: %d, status: 0x%x, out size: 0x%x\n",
+ enable ? "enable" : "disable", err, autoneg.status,
+ out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed)
+{
+ struct hinic_speed_cmd speed_info = {0};
+ u16 out_size = sizeof(speed_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ speed_info.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ speed_info.speed = speed;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_SPEED,
+ &speed_info, sizeof(speed_info),
+ &speed_info, &out_size);
+ if (err || !out_size || speed_info.status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, speed_info.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_link_settings(struct hinic_hwdev *hwdev,
+ struct hinic_link_ksettings_info *info)
+{
+ u16 out_size = sizeof(*info);
+ int err;
+
+ err = hinic_hilink_msg_cmd(hwdev, HINIC_HILINK_CMD_SET_LINK_SETTINGS,
+ info, sizeof(*info), info, &out_size);
+ if ((info->status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ info->status) || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, info->status, out_size);
+ return -EFAULT;
+ }
+
+ return info->status;
+}
+
+int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info)
+{
+ u16 out_size = sizeof(*pause_info);
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO,
+ pause_info, sizeof(*pause_info),
+ pause_info, &out_size);
+ if (err || !out_size || pause_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info)
+{
+ u16 out_size = sizeof(*pause_info);
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
+ pause_info, sizeof(*pause_info),
+ pause_info, &out_size);
+ if (err || !out_size || pause_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index 5ad04fb6722a..661c6322dc15 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -79,6 +79,42 @@ enum hinic_speed {
HINIC_SPEED_UNKNOWN = 0xFF,
};
+enum hinic_link_mode {
+ HINIC_10GE_BASE_KR = 0,
+ HINIC_40GE_BASE_KR4 = 1,
+ HINIC_40GE_BASE_CR4 = 2,
+ HINIC_100GE_BASE_KR4 = 3,
+ HINIC_100GE_BASE_CR4 = 4,
+ HINIC_25GE_BASE_KR_S = 5,
+ HINIC_25GE_BASE_CR_S = 6,
+ HINIC_25GE_BASE_KR = 7,
+ HINIC_25GE_BASE_CR = 8,
+ HINIC_GE_BASE_KX = 9,
+ HINIC_LINK_MODE_NUMBERS,
+
+ HINIC_SUPPORTED_UNKNOWN = 0xFFFF,
+};
+
+enum hinic_port_type {
+ HINIC_PORT_TP, /* BASET */
+ HINIC_PORT_AUI,
+ HINIC_PORT_MII,
+ HINIC_PORT_FIBRE, /* OPTICAL */
+ HINIC_PORT_BNC,
+ HINIC_PORT_ELEC,
+ HINIC_PORT_COPPER, /* PORT_DA */
+ HINIC_PORT_AOC,
+ HINIC_PORT_BACKPLANE,
+ HINIC_PORT_NONE = 0xEF,
+ HINIC_PORT_OTHER = 0xFF,
+};
+
+enum hinic_valid_link_settings {
+ HILINK_LINK_SET_SPEED = 0x1,
+ HILINK_LINK_SET_AUTONEG = 0x2,
+ HILINK_LINK_SET_FEC = 0x4,
+};
+
enum hinic_tso_state {
HINIC_TSO_DISABLE = 0,
HINIC_TSO_ENABLE = 1,
@@ -179,6 +215,50 @@ struct hinic_port_cap {
u8 rsvd2[3];
};
+struct hinic_link_mode_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u16 supported; /* 0xFFFF represents invalid value */
+ u16 advertised;
+};
+
+struct hinic_speed_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 speed;
+};
+
+struct hinic_set_autoneg_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable; /* 1: enable , 0: disable */
+};
+
+struct hinic_link_ksettings_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+
+ u32 valid_bitmap;
+ u32 speed; /* enum nic_speed_level */
+ u8 autoneg; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u8 rsvd2[18]; /* reserved for duplex, port, etc. */
+};
+
struct hinic_tso_config {
u8 status;
u8 version;
@@ -506,6 +586,61 @@ struct hinic_cmd_vport_stats {
struct hinic_vport_stats stats;
};
+struct hinic_tx_rate_cfg_max_min {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 min_rate;
+ u32 max_rate;
+ u8 rsvd2[8];
+};
+
+struct hinic_tx_rate_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 tx_rate;
+};
+
+enum nic_speed_level {
+ LINK_SPEED_10MB = 0,
+ LINK_SPEED_100MB,
+ LINK_SPEED_1GB,
+ LINK_SPEED_10GB,
+ LINK_SPEED_25GB,
+ LINK_SPEED_40GB,
+ LINK_SPEED_100GB,
+ LINK_SPEED_LEVELS,
+};
+
+struct hinic_spoofchk_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 state;
+ u8 rsvd1;
+ u16 func_id;
+};
+
+struct hinic_pause_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -585,4 +720,24 @@ int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en);
int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver);
+int hinic_set_link_settings(struct hinic_hwdev *hwdev,
+ struct hinic_link_ksettings_info *info);
+
+int hinic_get_link_mode(struct hinic_hwdev *hwdev,
+ struct hinic_link_mode_cmd *link_mode);
+
+int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable);
+
+int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed);
+
+int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info);
+
+int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info);
+
+int hinic_open(struct net_device *netdev);
+
+int hinic_close(struct net_device *netdev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index fd4aaf43874a..efab2dd2c889 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -22,6 +22,7 @@ MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto,
#define HINIC_VLAN_PRIORITY_SHIFT 13
#define HINIC_ADD_VLAN_IN_MAC 0x8000
+#define HINIC_TX_RATE_TABLE_FULL 12
static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
u16 vlan_id, u16 func_id)
@@ -129,6 +130,84 @@ static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid,
return 0;
}
+static int hinic_set_vf_tx_rate_max_min(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct hinic_tx_rate_cfg_max_min rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ rate_cfg.max_rate = max_rate;
+ rate_cfg.min_rate = min_rate;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size);
+ if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ rate_cfg.status) || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) max rate(%d), min rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err,
+ rate_cfg.status, out_size);
+ return -EIO;
+ }
+
+ if (!rate_cfg.status) {
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate;
+ }
+
+ return rate_cfg.status;
+}
+
+static int hinic_set_vf_rate_limit(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 tx_rate)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct hinic_tx_rate_cfg rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ rate_cfg.tx_rate = tx_rate;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size);
+ if (err || !out_size || rate_cfg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status,
+ out_size);
+ if (rate_cfg.status)
+ return rate_cfg.status;
+
+ return -EIO;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0;
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ int err;
+
+ err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED)
+ return err;
+
+ if (min_rate) {
+ dev_err(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate\n");
+ return -EOPNOTSUPP;
+ }
+
+ dev_info(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate, force min_tx_rate = max_tx_rate\n");
+
+ return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate);
+}
+
static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id)
{
struct vf_data_storage *vf_info;
@@ -160,6 +239,17 @@ static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id)
}
}
+ if (vf_info->max_rate) {
+ err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate,
+ vf_info->min_rate);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d max rate: %d, min rate: %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->max_rate,
+ vf_info->min_rate);
+ return err;
+ }
+ }
+
return 0;
}
@@ -700,6 +790,185 @@ int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
return err;
}
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate)
+{
+ u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000, SPEED_100000};
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_port_cap port_cap = { 0 };
+ enum hinic_port_link_state link_state;
+ int err;
+
+ if (vf >= nic_dev->sriov_info.num_vfs) {
+ netif_err(nic_dev, drv, netdev, "VF number must be less than %d\n",
+ nic_dev->sriov_info.num_vfs);
+ return -EINVAL;
+ }
+
+ if (max_tx_rate < min_tx_rate) {
+ netif_err(nic_dev, drv, netdev, "Max rate %d must be greater than or equal to min rate %d\n",
+ max_tx_rate, min_tx_rate);
+ return -EINVAL;
+ }
+
+ err = hinic_port_link_state(nic_dev, &link_state);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Get link status failed when setting vf tx rate\n");
+ return -EIO;
+ }
+
+ if (link_state == HINIC_LINK_STATE_DOWN) {
+ netif_err(nic_dev, drv, netdev,
+ "Link status must be up when setting vf tx rate\n");
+ return -EPERM;
+ }
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err || port_cap.speed > LINK_SPEED_100GB)
+ return -EIO;
+
+ /* rate limit cannot be less than 0 and greater than link speed */
+ if (max_tx_rate < 0 || max_tx_rate > speeds[port_cap.speed]) {
+ netif_err(nic_dev, drv, netdev, "Max tx rate must be in [0 - %d]\n",
+ speeds[port_cap.speed]);
+ return -EINVAL;
+ }
+
+ err = hinic_set_vf_tx_rate(nic_dev->hwdev, OS_VF_ID_TO_HW(vf),
+ max_tx_rate, min_tx_rate);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Unable to set VF %d max rate %d min rate %d%s\n",
+ vf, max_tx_rate, min_tx_rate,
+ err == HINIC_TX_RATE_TABLE_FULL ?
+ ", tx rate profile is full" : "");
+ return -EIO;
+ }
+
+ netif_info(nic_dev, drv, netdev,
+ "Set VF %d max tx rate %d min tx rate %d successfully\n",
+ vf, max_tx_rate, min_tx_rate);
+
+ return 0;
+}
+
+static int hinic_set_vf_spoofchk(struct hinic_hwdev *hwdev, u16 vf_id,
+ bool spoofchk)
+{
+ struct hinic_spoofchk_set spoofchk_cfg = {0};
+ struct vf_data_storage *vf_infos = NULL;
+ u16 out_size = sizeof(spoofchk_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vf_infos = hwdev->func_to_io.vf_infos;
+
+ spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ spoofchk_cfg.state = spoofchk ? 1 : 0;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ENABLE_SPOOFCHK,
+ &spoofchk_cfg, sizeof(spoofchk_cfg),
+ &spoofchk_cfg, &out_size);
+ if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || spoofchk_cfg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status,
+ out_size);
+ err = -EIO;
+ }
+
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk;
+
+ return err;
+}
+
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ bool cur_spoofchk;
+ int err;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
+
+ /* same request, so just return success */
+ if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
+ return 0;
+
+ err = hinic_set_vf_spoofchk(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf), setting);
+
+ if (!err) {
+ netif_info(nic_dev, drv, netdev, "Set VF %d spoofchk %s successfully\n",
+ vf, setting ? "on" : "off");
+ } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ netif_err(nic_dev, drv, netdev,
+ "Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n");
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int hinic_set_vf_link_state(struct hinic_hwdev *hwdev, u16 vf_id,
+ int link)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ u8 link_status = 0;
+
+ switch (link) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ?
+ true : false;
+ link_status = nic_io->link_status;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true;
+ link_status = HINIC_LINK_UP;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false;
+ link_status = HINIC_LINK_DOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Notify the VF of its new link state */
+ hinic_notify_vf_link_status(hwdev, vf_id, link_status);
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = &nic_dev->sriov_info;
+
+ if (vf_id >= sriov_info->num_vfs) {
+ netif_err(nic_dev, drv, netdev,
+ "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ return hinic_set_vf_link_state(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf_id), link);
+}
+
/* pf receive message from vf */
static int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
u16 in_size, void *buf_out, u16 *out_size)
@@ -801,6 +1070,12 @@ static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
if (hinic_vf_info_vlanprio(nic_dev->hwdev, vf_id))
hinic_kill_vf_vlan(nic_dev->hwdev, vf_id);
+ if (vf_infos->max_rate)
+ hinic_set_vf_tx_rate(nic_dev->hwdev, vf_id, 0, 0);
+
+ if (vf_infos->spoofchk)
+ hinic_set_vf_spoofchk(nic_dev->hwdev, vf_id, false);
+
if (vf_infos->trust)
hinic_set_vf_trust(nic_dev->hwdev, vf_id, false);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
index 64affc7474b5..ba627a362f9a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -86,6 +86,13 @@ int hinic_ndo_get_vf_config(struct net_device *netdev,
int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate);
+
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+
void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
u8 link_status);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4bd33245bad6..3de549c6c693 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2189,7 +2189,8 @@ static void __ibmvnic_reset(struct work_struct *work)
rc = do_hard_reset(adapter, rwi, reset_state);
rtnl_unlock();
}
- } else {
+ } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
+ adapter->from_passive_init)) {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 900affbdcc0e..1645e4e7ebdb 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -276,7 +276,8 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
return pkts;
}
-static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
{
struct xrx200_priv *priv = netdev_priv(net_dev);
struct xrx200_chan *ch = &priv->chan_tx;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 81d24481b22c..4d4b6243318a 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -666,11 +666,6 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
return 0;
}
-static inline __be16 sum16_as_be(__sum16 sum)
-{
- return (__force __be16)sum;
-}
-
static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
u16 *l4i_chk, u32 *command, int length)
{
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 51889770958d..e0e9e56830c0 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3561,6 +3561,10 @@ static void mvneta_start_dev(struct mvneta_port *pp)
MVNETA_CAUSE_LINK_CHANGE);
phylink_start(pp->phylink);
+
+ /* We may have called phy_speed_down before */
+ phy_speed_up(pp->dev->phydev);
+
netif_tx_start_all_queues(pp->dev);
}
@@ -3568,6 +3572,9 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
{
unsigned int cpu;
+ if (device_may_wakeup(&pp->dev->dev))
+ phy_speed_down(pp->dev->phydev, false);
+
phylink_stop(pp->phylink);
if (!pp->neta_armada3700) {
@@ -4040,6 +4047,10 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
phylink_ethtool_get_wol(pp->phylink, &wol);
device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+ /* PHY WoL may be enabled but device wakeup disabled */
+ if (wol.supported)
+ device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
+
return err;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 8972cdd559e8..7352244c5e68 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1428,6 +1428,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
struct mvpp2_ethtool_fs *efs;
int ret;
+ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
+ return -EINVAL;
+
efs = port->rfs_rules[info->fs.location];
if (!efs)
return -EINVAL;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 1fa60e985b43..2b5dad2ec650 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4329,6 +4329,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
if (!mvpp22_rss_is_supported())
return -EOPNOTSUPP;
+ if (rss_context >= MVPP22_N_RSS_TABLES)
+ return -EINVAL;
if (hfunc)
*hfunc = ETH_RSS_HASH_CRC32;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f1d2dea90a8c..5975521a4c86 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -379,40 +379,35 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}
-dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- gfp_t gfp)
+dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
{
dma_addr_t iova;
+ u8 *buf;
- /* Check if request can be accommodated in previous allocated page */
- if (pool->page && ((pool->page_offset + pool->rbsize) <=
- (PAGE_SIZE << pool->rbpage_order))) {
- pool->pageref++;
- goto ret;
- }
-
- otx2_get_page(pool);
-
- /* Allocate a new page */
- pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- pool->rbpage_order);
- if (unlikely(!pool->page))
+ buf = napi_alloc_frag(pool->rbsize);
+ if (unlikely(!buf))
return -ENOMEM;
- pool->page_offset = 0;
-ret:
- iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
- pool->rbsize, DMA_FROM_DEVICE);
- if (!iova) {
- if (!pool->page_offset)
- __free_pages(pool->page, pool->rbpage_order);
- pool->page = NULL;
+ iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ page_frag_free(buf);
return -ENOMEM;
}
- pool->page_offset += pool->rbsize;
+
return iova;
}
+static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+{
+ dma_addr_t addr;
+
+ local_bh_disable();
+ addr = __otx2_alloc_rbuf(pfvf, pool);
+ local_bh_enable();
+ return addr;
+}
+
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -805,7 +800,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, rbpool);
if (bufptr <= 0) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
@@ -1064,7 +1059,6 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
return err;
pool->rbsize = buf_size;
- pool->rbpage_order = get_order(buf_size);
/* Initialize this pool's context via AF */
aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
@@ -1152,13 +1146,12 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
return -ENOMEM;
for (ptr = 0; ptr < num_sqbs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0)
return bufptr;
otx2_aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
- otx2_get_page(pool);
}
return 0;
@@ -1204,13 +1197,12 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0)
return bufptr;
otx2_aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM);
}
- otx2_get_page(pool);
}
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 0b1c653b3449..2fa29889522e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -434,18 +434,6 @@ static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
}
-/* Update page ref count */
-static inline void otx2_get_page(struct otx2_pool *pool)
-{
- if (!pool->page)
- return;
-
- if (pool->pageref)
- page_ref_add(pool->page, pool->pageref);
- pool->pageref = 0;
- pool->page = NULL;
-}
-
static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
{
if (type == AURA_NIX_SQ)
@@ -589,8 +577,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
-dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- gfp_t gfp);
+dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 45abe0cd0e7b..b04f5429d72d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -286,7 +286,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC);
+ bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool);
if (unlikely(bufptr <= 0)) {
struct refill_work *work;
struct delayed_work *dwork;
@@ -304,7 +304,6 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
- otx2_get_page(cq->rbpool);
return processed_cqe;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 4ab32d3adb78..da97f2d4416f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -113,11 +113,7 @@ struct otx2_cq_poll {
struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
- u8 rbpage_order;
u16 rbsize;
- u32 page_offset;
- u16 pageref;
- struct page *page;
};
struct otx2_cq_queue {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 09047109d0da..f6a1f8666f95 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -65,7 +65,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
return __raw_readl(eth->base + reg);
}
-u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
{
u32 val;
@@ -1122,7 +1122,7 @@ static void mtk_stop_queue(struct mtk_eth *eth)
}
}
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 8a5ea2543670..b816154bc79a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1235,7 +1235,6 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
u32 i, rss_rings;
- int err = 0;
rss_rings = priv->prof->rss_rings ?: n;
rss_rings = rounddown_pow_of_two(rss_rings);
@@ -1249,7 +1248,7 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
if (hfunc)
*hfunc = priv->rss_hash_fn;
- return err;
+ return 0;
}
static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
@@ -1393,7 +1392,6 @@ static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
struct mlx4_spec_list *spec_l2,
unsigned char *mac)
{
- int err = 0;
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
@@ -1408,7 +1406,7 @@ static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
list_add_tail(&spec_l2->list, rule_list_h);
- return err;
+ return 0;
}
static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5716c3d2bb86..c72c4e1ea383 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
if (!err || err == -ENOSPC) {
priv->def_counter[port] = idx;
+ err = 0;
} else if (err == -ENOENT) {
err = 0;
continue;
@@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
*idx = get_param_l(&out_param);
-
+ if (WARN_ON(err == -ENOSPC))
+ err = -EINVAL;
return err;
}
return __mlx4_counter_alloc(dev, idx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
index c13260467750..82b185121edb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
@@ -5,7 +5,6 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include "en.h"
static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 34cba97f7bf4..cede5bdfd598 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work)
}
cmd->ent_arr[ent->idx] = ent;
- set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work)
if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
/* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) ||
@@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work)
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ /* no doorbell, no need to keep the entry */
+ free_ent(cmd, ent->idx);
+ if (ent->callback)
+ free_cmd(ent);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0864b76ca2c0..3bd64c63865b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -339,16 +339,6 @@ struct mlx5e_cq_decomp {
u16 wqe_counter;
} ____cacheline_aligned_in_smp;
-struct mlx5e_tx_wqe_info {
- struct sk_buff *skb;
- u32 num_bytes;
- u8 num_wqebbs;
- u8 num_dma;
-#ifdef CONFIG_MLX5_EN_TLS
- struct page *resync_dump_frag_page;
-#endif
-};
-
enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE
@@ -370,18 +360,6 @@ enum {
MLX5E_SQ_STATE_PENDING_XSK_TX,
};
-struct mlx5e_icosq_wqe_info {
- u8 opcode;
- u8 num_wqebbs;
-
- /* Auxiliary data for different opcodes. */
- union {
- struct {
- struct mlx5e_rq *rq;
- } umr;
- };
-};
-
struct mlx5e_txqsq {
/* data path */
@@ -484,11 +462,6 @@ struct mlx5e_xdp_info_fifo {
u32 mask;
};
-struct mlx5e_xdp_wqe_info {
- u8 num_wqebbs;
- u8 num_pkts;
-};
-
struct mlx5e_xdp_mpwqe {
/* Current MPWQE session */
struct mlx5e_tx_wqe *wqe;
@@ -919,8 +892,8 @@ void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
-netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
+void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 89fe65593c16..dce2bbbf9109 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -27,6 +27,11 @@
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+enum mlx5e_icosq_wqe_type {
+ MLX5E_ICOSQ_WQE_NOP,
+ MLX5E_ICOSQ_WQE_UMR_RX,
+};
+
static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
@@ -81,6 +86,16 @@ mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
return wqe;
}
+struct mlx5e_tx_wqe_info {
+ struct sk_buff *skb;
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+#ifdef CONFIG_MLX5_EN_TLS
+ struct page *resync_dump_frag_page;
+#endif
+};
+
static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -109,6 +124,18 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+struct mlx5e_icosq_wqe_info {
+ u8 wqe_type;
+ u8 num_wqebbs;
+
+ /* Auxiliary data for different wqe types. */
+ union {
+ struct {
+ struct mlx5e_rq *rq;
+ } umr;
+ };
+};
+
static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -125,7 +152,7 @@ static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
for (; wi < edge_wi; wi++) {
*wi = (struct mlx5e_icosq_wqe_info) {
- .opcode = MLX5_OPCODE_NOP,
+ .wqe_type = MLX5E_ICOSQ_WQE_NOP,
.num_wqebbs = 1,
};
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index ed6f045febeb..e2e01f064c1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -137,6 +137,11 @@ mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
}
+struct mlx5e_xdp_wqe_info {
+ u8 num_wqebbs;
+ u8 num_pkts;
+};
+
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index a6f65d4b2f36..fac145dcf2ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -102,33 +102,49 @@ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
udp_hdr(skb)->len = htons(payload_len);
}
-static inline struct sk_buff *
-mlx5e_accel_handle_tx(struct sk_buff *skb,
- struct mlx5e_txqsq *sq,
- struct net_device *dev,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+struct mlx5e_accel_tx_state {
+#ifdef CONFIG_MLX5_EN_TLS
+ struct mlx5e_accel_tx_tls_state tls;
+#endif
+};
+
+static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *state)
{
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ mlx5e_udp_gso_handle_tx_skb(skb);
+
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
- skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
- if (unlikely(!skb))
- return NULL;
+ /* May send SKBs and WQEs. */
+ if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
+ return false;
}
#endif
+ return true;
+}
+
+static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
- skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
- if (unlikely(!skb))
- return NULL;
+ if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
+ return false;
}
#endif
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- mlx5e_udp_gso_handle_tx_skb(skb);
-
- return skb;
+ return true;
}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 0dd17514caae..824b87ac8f9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -233,11 +233,10 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
ntohs(mdata->content.tx.seq));
}
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_tx_wqe *wqe,
- struct sk_buff *skb)
+bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
+ struct mlx5_wqe_eth_seg *eseg,
+ struct sk_buff *skb)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_metadata *mdata;
struct mlx5e_ipsec_sa_entry *sa_entry;
@@ -245,7 +244,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct sec_path *sp;
if (!xo)
- return skb;
+ return true;
sp = skb_sec_path(skb);
if (unlikely(sp->len != 1)) {
@@ -276,16 +275,16 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
goto drop;
}
- mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
+ mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
mlx5e_ipsec_set_metadata(skb, mdata, xo);
- return skb;
+ return true;
drop:
kfree_skb(skb);
- return NULL;
+ return false;
}
static inline struct xfrm_state *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index db84500b024f..ba02643586a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -52,9 +52,9 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_tx_wqe *wqe,
- struct sk_buff *skb);
+bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
+ struct mlx5_wqe_eth_seg *eseg,
+ struct sk_buff *skb);
#endif /* CONFIG_MLX5_EN_IPSEC */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 9daaec244385..dabbc5f226ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -9,6 +9,7 @@
#ifdef CONFIG_MLX5_EN_TLS
#include <net/tls.h>
#include "accel/tls.h"
+#include "en_accel/tls_rxtx.h"
#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
(offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
@@ -95,10 +96,9 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
-struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe, u16 *pi);
+bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, int datalen,
+ struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index ba973937f0b5..3cd78d9503c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -108,10 +108,11 @@ static void tx_fill_wi(struct mlx5e_txqsq *sq,
{
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = num_wqebbs;
- wi->num_bytes = num_bytes;
- wi->resync_dump_frag_page = page;
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_bytes = num_bytes,
+ .resync_dump_frag_page = page,
+ };
}
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -134,14 +135,14 @@ post_static_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
+ u16 pi, num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS;
struct mlx5e_umr_wqe *umr_wqe;
- u16 pi;
- pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
umr_wqe = MLX5E_TLS_FETCH_UMR_WQE(sq, pi);
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
- sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
+ tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
+ sq->pc += num_wqebbs;
}
static void
@@ -149,14 +150,14 @@ post_progress_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
+ u16 pi, num_wqebbs = MLX5E_KTLS_PROGRESS_WQEBBS;
struct mlx5e_tx_wqe *wqe;
- u16 pi;
- pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi);
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
- sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
+ tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
+ sq->pc += num_wqebbs;
}
static void
@@ -166,8 +167,6 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
{
bool progress_fence = skip_static_post || !fence_first_post;
- mlx5e_txqsq_get_next_pi(sq, MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS);
-
if (!skip_static_post)
post_static_params(sq, priv_tx, fence_first_post);
@@ -274,6 +273,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
int fsz;
u16 pi;
+ BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
@@ -342,7 +342,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_sq_stats *stats = sq->stats;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
- u8 num_wqebbs;
int i = 0;
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
@@ -371,9 +370,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
return MLX5E_KTLS_SYNC_DONE;
}
- num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
- mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
-
for (; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
@@ -413,35 +409,18 @@ err_out:
return MLX5E_KTLS_SYNC_FAIL;
}
-struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe, u16 *pi)
+bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, int datalen,
+ struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
- struct mlx5_wqe_ctrl_seg *cseg;
- struct tls_context *tls_ctx;
- int datalen;
u32 seq;
- if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
-
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
- if (!datalen)
- goto out;
-
- tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
- goto err_out;
-
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
- *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
stats->tls_ctx++;
}
@@ -452,31 +431,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
- *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
- *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
/* fall-through */
- default: /* MLX5E_KTLS_SYNC_FAIL */
+ case MLX5E_KTLS_SYNC_FAIL:
goto err_out;
}
}
priv_tx->expected_seq = seq + datalen;
- cseg = &(*wqe)->ctrl;
- cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
+ state->tls_tisn = priv_tx->tisn;
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen;
out:
- return skb;
+ return true;
err_out:
dev_kfree_skb_any(skb);
- return NULL;
+ return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 1d7ddeb7a46b..05454a843b28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -184,18 +184,17 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
nskb->queue_mapping = skb->queue_mapping;
}
-static struct sk_buff *
-mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
- struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi,
- struct mlx5e_tls *tls)
+static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
+ struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tls *tls)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
+ struct mlx5e_tx_wqe *wqe;
struct sync_info info;
struct sk_buff *nskb;
int linear_len = 0;
int headln;
+ u16 pi;
int i;
sq->stats->tls_ooo++;
@@ -217,7 +216,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
if (likely(payload <= -info.sync_len))
/* SKB payload doesn't require offload
*/
- return skb;
+ return true;
atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
goto err_out;
@@ -247,21 +246,19 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
- *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
- *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
- return skb;
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ mlx5e_sq_xmit(sq, nskb, wqe, pi, true);
+
+ return true;
err_out:
dev_kfree_skb_any(skb);
- return NULL;
+ return false;
}
-struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_tx *context;
@@ -270,41 +267,45 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen;
u32 skb_seq;
- if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
- skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
- goto out;
- }
-
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
+ return true;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
- goto out;
+ return true;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != netdev))
- goto out;
+ if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ goto err_out;
+
+ if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
+ return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx);
expected_seq = context->expected_seq;
- if (unlikely(expected_seq != skb_seq)) {
- skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
- goto out;
- }
+ if (unlikely(expected_seq != skb_seq))
+ return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
dev_kfree_skb_any(skb);
- skb = NULL;
- goto out;
+ return false;
}
context->expected_seq = skb_seq + datalen;
-out:
- return skb;
+ return true;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tisn = cpu_to_be32(state->tls_tisn << 8);
}
static int tls_update_resync_sn(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 90bc1f2384c8..a50d0394df0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -40,11 +40,14 @@
#include "en.h"
#include "en/txrx.h"
-struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi);
+struct mlx5e_accel_tx_tls_state {
+ u32 tls_tisn;
+};
+
+bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
+void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state);
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
u32 *cqe_bcnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 048a4f8601a8..0a9dfc31de3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1364,13 +1364,12 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
/* last doorbell out, godspeed .. */
if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe_info *wi;
struct mlx5e_tx_wqe *nop;
- wi = &sq->db.wqe_info[pi];
+ sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = 1,
+ };
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = 1;
nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
}
@@ -1482,20 +1481,21 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
/* Pre initialize fixed WQE fields */
for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i];
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg;
+ sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = 1,
+ .num_pkts = 1,
+ };
+
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
dseg->lkey = sq->mkey_be;
-
- wi->num_wqebbs = 1;
- wi->num_pkts = 1;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2de54d865dc8..1eac7a53d56f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1773,19 +1773,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
{
- int err = mlx5e_init_rep_rx(priv);
-
- if (err)
- return err;
-
mlx5e_create_q_counters(priv);
- return 0;
+ return mlx5e_init_rep_rx(priv);
}
static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
{
- mlx5e_destroy_q_counters(priv);
mlx5e_cleanup_rep_rx(priv);
+ mlx5e_destroy_q_counters(priv);
}
static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d9a5a669b84d..779600bebcca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -505,9 +505,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
MLX5_OPCODE_UMR);
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
- sq->db.wqe_info[pi].opcode = MLX5_OPCODE_UMR;
- sq->db.wqe_info[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
- sq->db.wqe_info[pi].umr.rq = rq;
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
+ .num_wqebbs = MLX5E_UMR_WQEBBS,
+ .umr.rq = rq,
+ };
+
sq->pc += MLX5E_UMR_WQEBBS;
sq->doorbell_cseg = &umr_wqe->ctrl;
@@ -616,15 +619,18 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break;
}
- if (likely(wi->opcode == MLX5_OPCODE_UMR))
+ switch (wi->wqe_type) {
+ case MLX5E_ICOSQ_WQE_UMR_RX:
wi->umr.rq->mpwqe.umr_completed++;
- else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
+ break;
+ case MLX5E_ICOSQ_WQE_NOP:
+ break;
+ default:
netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- wi->opcode);
-
+ "Bad WQE type in ICOSQ WQE info: 0x%x\n",
+ wi->wqe_type);
+ }
} while (!last_wqe);
-
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
sq->cc = sqcc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 77397aa66810..a050808f2128 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1097,7 +1097,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (IS_ERR(priv->fs.tc.t)) {
mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
- "Failed to create tc offload table\n");
+ "Failed to create tc offload table");
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
return PTR_ERR(priv->fs.tc.t);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 583e1b201b75..f79454746d0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -265,8 +265,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
-netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
+void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
@@ -373,32 +373,38 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg, xmit_more);
- return NETDEV_TX_OK;
+ return;
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5e_tx_wqe *wqe;
struct mlx5e_txqsq *sq;
u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
+
+ /* May send SKBs and WQEs. */
+ if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
+ goto out;
+
pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- /* might send skbs and update wqe and pi */
- skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
- if (unlikely(!skb))
- return NETDEV_TX_OK;
+ /* May update the WQE, but may not post other WQEs. */
+ if (unlikely(!mlx5e_accel_tx_finish(priv, sq, skb, wqe, &accel)))
+ goto out;
- return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
+ mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
+
+out:
+ return NETDEV_TX_OK;
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
@@ -568,9 +574,8 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
}
-netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_av *av, u32 dqpn, u32 dqkey,
- bool xmit_more)
+void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
{
struct mlx5i_tx_wqe *wqe;
@@ -648,12 +653,10 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg, xmit_more);
- return NETDEV_TX_OK;
+ return;
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 869fd58a6775..8480278f2ee2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,11 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
struct mlx5e_tx_wqe *nopwqe;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- sq->db.wqe_info[pi].opcode = MLX5_OPCODE_NOP;
- sq->db.wqe_info[pi].num_wqebbs = 1;
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_NOP,
+ .num_wqebbs = 1,
+ };
+
nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 781c1d184b60..57ac2ef52e80 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1550,9 +1550,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
MLX5_FLOW_NAMESPACE_KERNEL, 1,
modact);
if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
esw_warn(dev, "Failed to create restore mod header, err: %d\n",
err);
- err = PTR_ERR(mod_hdr);
goto err_mod_hdr;
}
@@ -2219,10 +2219,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
+ mutex_init(&esw->fdb_table.offloads.vports.lock);
+ hash_init(esw->fdb_table.offloads.vports.table);
err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
- return err;
+ goto create_acl_err;
err = esw_create_offloads_table(esw, total_vports);
if (err)
@@ -2240,9 +2242,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
goto create_fg_err;
- mutex_init(&esw->fdb_table.offloads.vports.lock);
- hash_init(esw->fdb_table.offloads.vports.table);
-
return 0;
create_fg_err:
@@ -2253,18 +2252,19 @@ create_restore_err:
esw_destroy_offloads_table(esw);
create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
-
+create_acl_err:
+ mutex_destroy(&esw->fdb_table.offloads.vports.lock);
return err;
}
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
- mutex_destroy(&esw->fdb_table.offloads.vports.lock);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
+ mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
static void
@@ -2377,9 +2377,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
err_vports:
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
err_uplink:
- esw_set_passing_vport_metadata(esw, false);
-err_steering_init:
esw_offloads_steering_cleanup(esw);
+err_steering_init:
+ esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 068578be00f1..035bd21e5d4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -663,7 +663,9 @@ static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
struct mlx5_ib_ah *mah = to_mah(address);
struct mlx5i_priv *ipriv = epriv->ppriv;
- return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
+ mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
+
+ return NETDEV_TX_OK;
}
static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 7844ab5d0ce7..c4aa47018c0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -113,9 +113,8 @@ struct mlx5i_tx_wqe {
#define MLX5I_SQ_FETCH_WQE(sq, pi) \
((struct mlx5i_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5i_tx_wqe)))
-netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_av *av, u32 dqpn, u32 dqkey,
- bool xmit_more);
+void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index c6ad5ca46877..874c70e8cc54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -42,7 +42,7 @@
* Beware of lock dependencies (preferably, no locks should be acquired
* under it).
*/
-static DEFINE_MUTEX(lag_mutex);
+static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2)
@@ -274,9 +274,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!dev0 || !dev1)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
tracker = ldev->tracker;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
@@ -458,9 +458,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break;
}
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev->tracker = tracker;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
if (changed)
mlx5_queue_bond_work(ldev, 0);
@@ -502,7 +502,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
if (fn >= MLX5_MAX_PORTS)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev->pf[fn].dev = dev;
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -510,7 +510,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
}
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
@@ -525,11 +525,11 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
if (i == MLX5_MAX_PORTS)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
}
/* Must be called with intf_mutex held */
@@ -607,10 +607,10 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -621,10 +621,10 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_active(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -635,10 +635,10 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -664,7 +664,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
@@ -681,12 +681,36 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return ndev;
}
EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
+u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ struct net_device *slave)
+{
+ struct mlx5_lag *ldev;
+ u8 port = 0;
+
+ spin_lock(&lag_lock);
+ ldev = mlx5_lag_dev_get(dev);
+ if (!(ldev && __mlx5_lag_is_roce(ldev)))
+ goto unlock;
+
+ if (ldev->pf[MLX5_LAG_P1].netdev == slave)
+ port = MLX5_LAG_P1;
+ else
+ port = MLX5_LAG_P2;
+
+ port = ldev->v2p_map[port];
+
+unlock:
+ spin_unlock(&lag_lock);
+ return port;
+}
+EXPORT_SYMBOL(mlx5_lag_get_slave_port);
+
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
@@ -723,7 +747,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
@@ -733,6 +757,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
+ spin_unlock(&lag_lock);
for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
@@ -742,14 +767,13 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
out);
if (ret)
- goto unlock;
+ goto free;
for (j = 0; j < num_counters; ++j)
values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
}
-unlock:
- mutex_unlock(&lag_mutex);
+free:
kvfree(out);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index c4ed25bb9ac8..b8d97d44be7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -693,6 +693,12 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return 0;
}
+static void dr_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
+{
+ pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_uars_page *uar,
size_t ncqe)
@@ -753,6 +759,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
+ cq->mcq.comp = dr_cq_complete;
+
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
kvfree(in);
@@ -763,7 +771,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
*cq->mcq.set_ci_db = 0;
- *cq->mcq.arm_db = 0;
+
+ /* set no-zero value, in order to avoid the HW to run db-recovery on
+ * CQ that used in polling mode.
+ */
+ *cq->mcq.arm_db = cpu_to_be32(2 << 28);
+
cq->mcq.vector = 0;
cq->mcq.irqn = irqn;
cq->mcq.uar = uar;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a12ca673c224..147a5634244b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -636,7 +636,11 @@ struct mlxsw_sp_acl_rule_info {
/* spectrum_flow.c */
struct mlxsw_sp_flow_block {
struct list_head binding_list;
- struct list_head mall_list;
+ struct {
+ struct list_head list;
+ unsigned int min_prio;
+ unsigned int max_prio;
+ } mall;
struct mlxsw_sp_acl_ruleset *ruleset_zero;
struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count;
@@ -739,6 +743,9 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
+void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio);
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
@@ -887,7 +894,8 @@ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_matchall.c */
-int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
+int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f);
void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f);
@@ -895,6 +903,8 @@ int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
+ unsigned int *p_min_prio, unsigned int *p_max_prio);
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
@@ -912,6 +922,10 @@ int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ u32 chain_index, unsigned int *p_min_prio,
+ unsigned int *p_max_prio);
/* spectrum_qdisc.c */
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index c61f78e30397..47da9ee0045d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -51,6 +51,8 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
unsigned int ref_count;
+ unsigned int min_prio;
+ unsigned int max_prio;
unsigned long priv[];
/* priv has to be always the last item */
};
@@ -178,7 +180,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_init;
err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
- tmplt_elusage);
+ tmplt_elusage, &ruleset->min_prio,
+ &ruleset->max_prio);
if (err)
goto err_ops_ruleset_add;
@@ -293,6 +296,14 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
return ops->ruleset_group_id(ruleset->priv);
}
+void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ *p_min_prio = ruleset->min_prio;
+ *p_max_prio = ruleset->max_prio;
+}
+
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
struct mlxsw_afa_block *afa_block)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 430da69003d8..5c020403342f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -179,6 +179,8 @@ struct mlxsw_sp_acl_tcam_vgroup {
bool tmplt_elusage_set;
struct mlxsw_afk_element_usage tmplt_elusage;
bool vregion_rehash_enabled;
+ unsigned int *p_min_prio;
+ unsigned int *p_max_prio;
};
struct mlxsw_sp_acl_tcam_rehash_ctx {
@@ -316,13 +318,17 @@ mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_pattern *patterns,
unsigned int patterns_count,
struct mlxsw_afk_element_usage *tmplt_elusage,
- bool vregion_rehash_enabled)
+ bool vregion_rehash_enabled,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
int err;
vgroup->patterns = patterns;
vgroup->patterns_count = patterns_count;
vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
+ vgroup->p_min_prio = p_min_prio;
+ vgroup->p_max_prio = p_max_prio;
if (tmplt_elusage) {
vgroup->tmplt_elusage_set = true;
@@ -416,6 +422,21 @@ mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
return vchunk->priority;
}
+static void
+mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+
+ if (list_empty(&vgroup->vregion_list))
+ return;
+ vregion = list_first_entry(&vgroup->vregion_list,
+ typeof(*vregion), list);
+ *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
+ vregion = list_last_entry(&vgroup->vregion_list,
+ typeof(*vregion), list);
+ *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
+}
+
static int
mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
@@ -986,8 +1007,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
struct mlxsw_sp_acl_tcam_vregion *vregion;
- struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ struct list_head *pos;
int err;
if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
@@ -1025,8 +1047,16 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
- list_add_tail(&vchunk->list, &vregion->vchunk_list);
+
+ /* Position the vchunk inside the list according to priority */
+ list_for_each(pos, &vregion->vchunk_list) {
+ vchunk2 = list_entry(pos, typeof(*vchunk2), list);
+ if (vchunk2->priority > priority)
+ break;
+ }
+ list_add_tail(&vchunk->list, pos);
mutex_unlock(&vregion->lock);
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
return vchunk;
@@ -1058,6 +1088,7 @@ mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_vchunk_ht_params);
mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
kfree(vchunk);
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
}
static struct mlxsw_sp_acl_tcam_vchunk *
@@ -1574,14 +1605,17 @@ static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage)
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
- tmplt_elusage, true);
+ tmplt_elusage, true,
+ p_min_prio, p_max_prio);
}
static void
@@ -1690,7 +1724,9 @@ static int
mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage)
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
int err;
@@ -1698,7 +1734,8 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
- tmplt_elusage, false);
+ tmplt_elusage, false,
+ p_min_prio, p_max_prio);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 96437992b102..a41df10ade9b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -42,7 +42,8 @@ struct mlxsw_sp_acl_profile_ops {
size_t ruleset_priv_size;
int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage);
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio, unsigned int *p_max_prio);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
index ecab581ff956..47b66f347ff1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
@@ -18,7 +18,7 @@ mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, struct net *net)
if (!block)
return NULL;
INIT_LIST_HEAD(&block->binding_list);
- INIT_LIST_HEAD(&block->mall_list);
+ INIT_LIST_HEAD(&block->mall.list);
block->mlxsw_sp = mlxsw_sp;
block->net = net;
return block;
@@ -135,9 +135,11 @@ static int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_flow_block_mall_cb(struct mlxsw_sp_flow_block *flow_block,
struct tc_cls_matchall_offload *f)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
+
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
- return mlxsw_sp_mall_replace(flow_block, f);
+ return mlxsw_sp_mall_replace(mlxsw_sp, flow_block, f);
case TC_CLSMATCHALL_DESTROY:
mlxsw_sp_mall_destroy(flow_block, f);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 89c2e9820e95..b286fe158820 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -36,7 +36,8 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
if (err)
return err;
- } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED) {
+ } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED &&
+ act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
return -EOPNOTSUPP;
}
@@ -504,6 +505,34 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
f->common.extack);
}
+static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+ unsigned int mall_min_prio;
+ unsigned int mall_max_prio;
+ int err;
+
+ err = mlxsw_sp_mall_prio_get(block, f->common.chain_index,
+ &mall_min_prio, &mall_max_prio);
+ if (err) {
+ if (err == -ENOENT)
+ /* No matchall filters installed on this chain. */
+ return 0;
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
+ return err;
+ }
+ if (ingress && f->common.prio <= mall_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ if (!ingress && f->common.prio >= mall_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
@@ -513,6 +542,10 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule;
int err;
+ err = mlxsw_sp_flower_mall_prio_check(block, f);
+ if (err)
+ return err;
+
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
@@ -646,3 +679,23 @@ void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
+
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ u32 chain_index, unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
+ chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (IS_ERR(ruleset))
+ /* In case there are no flower rules, the caller
+ * receives -ENOENT to indicate there is no need
+ * to check the priorities.
+ */
+ return PTR_ERR(ruleset);
+ mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
index da1c05f44cec..f1a44a8eda55 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
@@ -23,6 +23,7 @@ struct mlxsw_sp_mall_mirror_entry {
struct mlxsw_sp_mall_entry {
struct list_head list;
unsigned long cookie;
+ unsigned int priority;
enum mlxsw_sp_mall_action_type type;
bool ingress;
union {
@@ -37,7 +38,7 @@ mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie
{
struct mlxsw_sp_mall_entry *mall_entry;
- list_for_each_entry(mall_entry, &block->mall_list, list)
+ list_for_each_entry(mall_entry, &block->mall.list, list)
if (mall_entry->cookie == cookie)
return mall_entry;
@@ -175,13 +176,33 @@ mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
+static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ if (list_empty(&block->mall.list))
+ return;
+ block->mall.min_prio = UINT_MAX;
+ block->mall.max_prio = 0;
+ list_for_each_entry(mall_entry, &block->mall.list, list) {
+ if (mall_entry->priority < block->mall.min_prio)
+ block->mall.min_prio = mall_entry->priority;
+ if (mall_entry->priority > block->mall.max_prio)
+ block->mall.max_prio = mall_entry->priority;
+ }
+}
+
+int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f)
{
struct mlxsw_sp_flow_block_binding *binding;
struct mlxsw_sp_mall_entry *mall_entry;
__be16 protocol = f->common.protocol;
struct flow_action_entry *act;
+ unsigned int flower_min_prio;
+ unsigned int flower_max_prio;
+ bool flower_prio_valid;
int err;
if (!flow_offload_has_one_action(&f->rule->action)) {
@@ -199,19 +220,56 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
return -EOPNOTSUPP;
}
+ err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
+ &flower_min_prio, &flower_max_prio);
+ if (err) {
+ if (err != -ENOENT) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
+ return err;
+ }
+ flower_prio_valid = false;
+ /* No flower filters are installed in specified chain. */
+ } else {
+ flower_prio_valid = true;
+ }
+
mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
if (!mall_entry)
return -ENOMEM;
mall_entry->cookie = f->cookie;
+ mall_entry->priority = f->common.prio;
mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
act = &f->rule->action.entries[0];
if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
+ if (flower_prio_valid && mall_entry->ingress &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid && !mall_entry->ingress &&
+ mall_entry->priority <= flower_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
mall_entry->mirror.to_dev = act->dev;
} else if (act->id == FLOW_ACTION_SAMPLE &&
protocol == htons(ETH_P_ALL)) {
+ if (!mall_entry->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
err = -EOPNOTSUPP;
@@ -239,7 +297,8 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
block->egress_blocker_rule_count++;
else
block->ingress_blocker_rule_count++;
- list_add_tail(&mall_entry->list, &block->mall_list);
+ list_add_tail(&mall_entry->list, &block->mall.list);
+ mlxsw_sp_mall_prio_update(block);
return 0;
rollback:
@@ -272,6 +331,7 @@ void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
list_for_each_entry(binding, &block->binding_list, list)
mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
+ mlxsw_sp_mall_prio_update(block);
}
int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
@@ -280,7 +340,7 @@ int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_mall_entry *mall_entry;
int err;
- list_for_each_entry(mall_entry, &block->mall_list, list) {
+ list_for_each_entry(mall_entry, &block->mall.list, list) {
err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
if (err)
goto rollback;
@@ -288,7 +348,7 @@ int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
return 0;
rollback:
- list_for_each_entry_continue_reverse(mall_entry, &block->mall_list,
+ list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
list)
mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
return err;
@@ -299,6 +359,20 @@ void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
{
struct mlxsw_sp_mall_entry *mall_entry;
- list_for_each_entry(mall_entry, &block->mall_list, list)
+ list_for_each_entry(mall_entry, &block->mall.list, list)
mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
}
+
+int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
+ unsigned int *p_min_prio, unsigned int *p_max_prio)
+{
+ if (chain_index || list_empty(&block->mall.list))
+ /* In case there are no matchall rules, the caller
+ * receives -ENOENT to indicate there is no need
+ * to check the priorities.
+ */
+ return -ENOENT;
+ *p_min_prio = block->mall.min_prio;
+ *p_max_prio = block->mall.max_prio;
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 1f496fac7033..5bd7fb917b7a 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -17,11 +17,6 @@
#include "encx24j600_hw.h"
-static inline bool is_bits_set(int value, int mask)
-{
- return (value & mask) == mask;
-}
-
static int encx24j600_switch_bank(struct encx24j600_context *ctx,
int bank)
{
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 39925e4bf2ec..fccc4805247f 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -604,9 +604,8 @@ static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
}
}
-static int encx24j600_hw_init(struct encx24j600_priv *priv)
+static void encx24j600_hw_init(struct encx24j600_priv *priv)
{
- int ret = 0;
u16 macon2;
priv->hw_enabled = false;
@@ -649,8 +648,6 @@ static int encx24j600_hw_init(struct encx24j600_priv *priv)
if (netif_msg_hw(priv))
encx24j600_dump_config(priv, "Hw is initialized");
-
- return ret;
}
static void encx24j600_hw_enable(struct encx24j600_priv *priv)
@@ -1042,12 +1039,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
}
/* Initialize the device HW to the consistent state */
- if (encx24j600_hw_init(priv)) {
- netif_err(priv, probe, ndev,
- DRV_NAME ": HW initialization error\n");
- ret = -EIO;
- goto out_free;
- }
+ encx24j600_hw_init(priv);
kthread_init_worker(&priv->kworker);
kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index e1651756bf9d..49fd843c4c8a 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -331,14 +331,15 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
void *desc;
unsigned int len;
unsigned int tx_head;
u32 txdes1;
- int ret = NETDEV_TX_BUSY;
+ netdev_tx_t ret = NETDEV_TX_BUSY;
spin_lock_irq(&priv->txlock);
@@ -564,7 +565,7 @@ static int moxart_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
unregister_netdev(ndev);
- free_irq(ndev->irq, ndev);
+ devm_free_irq(&pdev->dev, ndev->irq, ndev);
moxart_mac_free_memory(ndev);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index a2b9b85612a4..c798431ce2a1 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1030,10 +1030,8 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
{
int i, j;
- /* Loop through all the mac tables entries. There are 1024 rows of 4
- * entries.
- */
- for (i = 0; i < 1024; i++) {
+ /* Loop through all the mac tables entries. */
+ for (i = 0; i < ocelot->num_mact_rows; i++) {
for (j = 0; j < 4; j++) {
struct ocelot_mact_entry entry;
bool is_static;
@@ -1458,8 +1456,15 @@ static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
{
- ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2),
- ANA_AUTOAGE);
+ unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
+
+ /* Setting AGE_PERIOD to zero effectively disables automatic aging,
+ * which is clearly not what our intention is. So avoid that.
+ */
+ if (!age_period)
+ age_period = 1;
+
+ ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
}
EXPORT_SYMBOL(ocelot_set_ageing_time);
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index ed4dd01a41ad..81d81ff75646 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -433,6 +433,7 @@ int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
ocelot->shared_queue_sz = 224 * 1024;
+ ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
ret = ocelot_regfields_init(ocelot, ocelot_regfields);
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index d326e231f0ad..b7baf7624e18 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -48,7 +48,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
NL_SET_ERR_MSG_MOD(extack,
- "Only one policer per port is supported\n");
+ "Only one policer per port is supported");
return -EEXIST;
}
@@ -59,7 +59,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
err = ocelot_port_policer_add(ocelot, port, &pol);
if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Could not add policer\n");
+ NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
return err;
}
@@ -73,7 +73,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
err = ocelot_port_policer_del(ocelot, port);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "Could not delete policer\n");
+ "Could not delete policer");
return err;
}
priv->tc.police_id = 0;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 2616fd735aab..e1e1f4e3639e 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1174,18 +1174,6 @@ myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
mb();
}
-static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
-{
- struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
-
- if ((skb->protocol == htons(ETH_P_8021Q)) &&
- (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
- vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
- skb->csum = hw_csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
-}
-
static void
myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int watchdog)
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index bfa0c0d39600..8b018ed37b1b 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -208,11 +208,13 @@ static int jazz_sonic_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
- goto out1;
+ goto undo_probe1;
return 0;
-out1:
+undo_probe1:
+ dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 9183b3e85d21..354efffac0f9 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n");
eth_hw_addr_random(nn->dp.netdev);
+ nfp_nsp_close(nsp);
return;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 79d72c88bbef..b3cabc274121 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -299,6 +299,20 @@ static void nfp_repr_clean(struct nfp_repr *repr)
nfp_port_free(repr->port);
}
+static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
+
+static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
+}
+
+static void nfp_repr_set_lockdep_class(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
+}
+
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev)
@@ -308,6 +322,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 repr_cap = nn->tlv_caps.repr_cap;
int err;
+ nfp_repr_set_lockdep_class(netdev);
+
repr->port = port;
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
if (!repr->dst)
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 2fdd0753b3af..d2708a57f2ff 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -502,7 +502,8 @@ static int nixge_check_tx_bd_space(struct nixge_priv *priv,
return 0;
}
-static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct nixge_hw_dma_bd *cur_p;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 311454d9b0bc..d3cbb4215f5c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1030,7 +1030,8 @@ static int lpc_eth_close(struct net_device *ndev)
return 0;
}
-static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
u32 len, txidx;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 5f8fc58d42b3..11621ccc1faf 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -170,8 +170,7 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
- debugfs_create_u8("done_color", 0400, cq_dentry,
- (u8 *)&cq->done_color);
+ debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index f4ae40ae1e53..d83eff0ae0ac 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -388,6 +388,19 @@ int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
}
/* LIF commands */
+void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
+ u16 lif_type, u8 qtype, u8 qver)
+{
+ union ionic_dev_cmd cmd = {
+ .q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
+ .q_identify.lif_type = lif_type,
+ .q_identify.type = qtype,
+ .q_identify.ver = qver,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver)
{
union ionic_dev_cmd cmd = {
@@ -431,6 +444,7 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
.q_init.opcode = IONIC_CMD_Q_INIT,
.q_init.lif_index = cpu_to_le16(lif_index),
.q_init.type = q->type,
+ .q_init.ver = qcq->q.lif->qtype_info[q->type].version,
.q_init.index = cpu_to_le32(q->index),
.q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_ENA),
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 587398b01997..525434f10025 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -12,7 +12,8 @@
#define IONIC_MIN_MTU ETH_MIN_MTU
#define IONIC_MAX_MTU 9194
-#define IONIC_MAX_TXRX_DESC 16384
+#define IONIC_MAX_TX_DESC 8192
+#define IONIC_MAX_RX_DESC 16384
#define IONIC_MIN_TXRX_DESC 16
#define IONIC_DEF_TXRX_DESC 4096
#define IONIC_LIFS_MAX 1024
@@ -83,6 +84,8 @@ static_assert(sizeof(struct ionic_q_init_cmd) == 64);
static_assert(sizeof(struct ionic_q_init_comp) == 16);
static_assert(sizeof(struct ionic_q_control_cmd) == 64);
static_assert(sizeof(ionic_q_control_comp) == 16);
+static_assert(sizeof(struct ionic_q_identify_cmd) == 64);
+static_assert(sizeof(struct ionic_q_identify_comp) == 16);
static_assert(sizeof(struct ionic_rx_mode_set_cmd) == 64);
static_assert(sizeof(ionic_rx_mode_set_comp) == 16);
@@ -179,7 +182,7 @@ struct ionic_desc_info {
void *cb_arg;
};
-#define QUEUE_NAME_MAX_SZ 32
+#define IONIC_QUEUE_NAME_MAX_SZ 32
struct ionic_queue {
u64 dbell_count;
@@ -204,14 +207,14 @@ struct ionic_queue {
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
- char name[QUEUE_NAME_MAX_SZ];
+ char name[IONIC_QUEUE_NAME_MAX_SZ];
};
-#define INTR_INDEX_NOT_ASSIGNED -1
-#define INTR_NAME_MAX_SZ 32
+#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
+#define IONIC_INTR_NAME_MAX_SZ 32
struct ionic_intr_info {
- char name[INTR_NAME_MAX_SZ];
+ char name[IONIC_INTR_NAME_MAX_SZ];
unsigned int index;
unsigned int vector;
u64 rearm_count;
@@ -283,6 +286,8 @@ void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data);
+void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
+ u16 lif_type, u8 qtype, u8 qver);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
dma_addr_t addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 6996229facfd..f7e3ce3de04d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -12,10 +12,11 @@
#include "ionic_stats.h"
static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define PRIV_F_SW_DBG_STATS BIT(0)
+#define IONIC_PRIV_F_SW_DBG_STATS BIT(0)
"sw-dbg-stats",
};
-#define PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
+
+#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
@@ -58,7 +59,7 @@ static int ionic_get_sset_count(struct net_device *netdev, int sset)
count = ionic_get_stats_count(lif);
break;
case ETH_SS_PRIV_FLAGS:
- count = PRIV_FLAGS_COUNT;
+ count = IONIC_PRIV_FLAGS_COUNT;
break;
}
return count;
@@ -75,7 +76,7 @@ static void ionic_get_strings(struct net_device *netdev,
break;
case ETH_SS_PRIV_FLAGS:
memcpy(buf, ionic_priv_flags_strings,
- PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
+ IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
break;
}
}
@@ -159,6 +160,8 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
break;
+ case IONIC_XCVR_PID_QSFP_100G_CWDM4:
+ case IONIC_XCVR_PID_QSFP_100G_PSM4:
case IONIC_XCVR_PID_QSFP_100G_LR4:
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full);
@@ -178,6 +181,7 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
break;
case IONIC_XCVR_PID_SFP_25GBASE_SR:
case IONIC_XCVR_PID_SFP_25GBASE_AOC:
+ case IONIC_XCVR_PID_SFP_25GBASE_ACC:
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
break;
@@ -458,9 +462,9 @@ static void ionic_get_ringparam(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
- ring->tx_max_pending = IONIC_MAX_TXRX_DESC;
+ ring->tx_max_pending = IONIC_MAX_TX_DESC;
ring->tx_pending = lif->ntxq_descs;
- ring->rx_max_pending = IONIC_MAX_TXRX_DESC;
+ ring->rx_max_pending = IONIC_MAX_RX_DESC;
ring->rx_pending = lif->nrxq_descs;
}
@@ -554,7 +558,7 @@ static u32 ionic_get_priv_flags(struct net_device *netdev)
u32 priv_flags = 0;
if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- priv_flags |= PRIV_F_SW_DBG_STATS;
+ priv_flags |= IONIC_PRIV_F_SW_DBG_STATS;
return priv_flags;
}
@@ -564,7 +568,7 @@ static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
struct ionic_lif *lif = netdev_priv(netdev);
clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
- if (priv_flags & PRIV_F_SW_DBG_STATS)
+ if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS)
set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
return 0;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index ceeb7629e7a0..7e22ba4ed915 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
-/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */
+/* Copyright (c) 2017-2020 Pensando Systems, Inc. All rights reserved. */
#ifndef _IONIC_IF_H_
#define _IONIC_IF_H_
@@ -9,7 +9,7 @@
#define IONIC_IFNAMSIZ 16
/**
- * Commands
+ * enum ionic_cmd_opcode - Device commands
*/
enum ionic_cmd_opcode {
IONIC_CMD_NOP = 0,
@@ -40,6 +40,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_RX_FILTER_DEL = 32,
/* Queue commands */
+ IONIC_CMD_Q_IDENTIFY = 39,
IONIC_CMD_Q_INIT = 40,
IONIC_CMD_Q_CONTROL = 41,
@@ -57,6 +58,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
IONIC_CMD_QOS_CLASS_RESET = 242,
+ IONIC_CMD_QOS_CLASS_UPDATE = 243,
/* Firmware commands */
IONIC_CMD_FW_DOWNLOAD = 254,
@@ -64,7 +66,7 @@ enum ionic_cmd_opcode {
};
/**
- * Command Return codes
+ * enum ionic_status_code - Device command return codes
*/
enum ionic_status_code {
IONIC_RC_SUCCESS = 0, /* Success */
@@ -97,6 +99,7 @@ enum ionic_notifyq_opcode {
IONIC_EVENT_RESET = 2,
IONIC_EVENT_HEARTBEAT = 3,
IONIC_EVENT_LOG = 4,
+ IONIC_EVENT_XCVR = 5,
};
/**
@@ -114,12 +117,11 @@ struct ionic_admin_cmd {
/**
* struct ionic_admin_comp - General admin command completion format
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @cmd_data: Command-specific bytes.
- * @color: Color bit. (Always 0 for commands issued to the
- * Device Cmd Registers.)
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @cmd_data: Command-specific bytes
+ * @color: Color bit (Always 0 for commands issued to the
+ * Device Cmd Registers)
*/
struct ionic_admin_comp {
u8 status;
@@ -146,7 +148,7 @@ struct ionic_nop_cmd {
/**
* struct ionic_nop_comp - NOP command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_nop_comp {
u8 status;
@@ -156,7 +158,7 @@ struct ionic_nop_comp {
/**
* struct ionic_dev_init_cmd - Device init command
* @opcode: opcode
- * @type: device type
+ * @type: Device type
*/
struct ionic_dev_init_cmd {
u8 opcode;
@@ -166,7 +168,7 @@ struct ionic_dev_init_cmd {
/**
* struct init_comp - Device init command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_dev_init_comp {
u8 status;
@@ -184,7 +186,7 @@ struct ionic_dev_reset_cmd {
/**
* struct reset_comp - Reset command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_dev_reset_comp {
u8 status;
@@ -205,8 +207,8 @@ struct ionic_dev_identify_cmd {
};
/**
- * struct dev_identify_comp - Driver/device identify command completion
- * @status: The status of the command (enum status_code)
+ * struct ionic_dev_identify_comp - Driver/device identify command completion
+ * @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
*/
struct ionic_dev_identify_comp {
@@ -225,8 +227,8 @@ enum ionic_os_type {
};
/**
- * union drv_identity - driver identity information
- * @os_type: OS type (see enum os_type)
+ * union ionic_drv_identity - driver identity information
+ * @os_type: OS type (see enum ionic_os_type)
* @os_dist: OS distribution, numeric format
* @os_dist_str: OS distribution, string format
* @kernel_ver: Kernel version, numeric format
@@ -242,26 +244,26 @@ union ionic_drv_identity {
char kernel_ver_str[32];
char driver_ver_str[32];
};
- __le32 words[512];
+ __le32 words[478];
};
/**
- * union dev_identity - device identity information
+ * union ionic_dev_identity - device identity information
* @version: Version of device identify
* @type: Identify type (0 for now)
* @nports: Number of ports provisioned
* @nlifs: Number of LIFs provisioned
* @nintrs: Number of interrupts provisioned
* @ndbpgs_per_lif: Number of doorbell pages per LIF
- * @intr_coal_mult: Interrupt coalescing multiplication factor.
+ * @intr_coal_mult: Interrupt coalescing multiplication factor
* Scale user-supplied interrupt coalescing
* value in usecs to device units using:
* device units = usecs * mult / div
- * @intr_coal_div: Interrupt coalescing division factor.
+ * @intr_coal_div: Interrupt coalescing division factor
* Scale user-supplied interrupt coalescing
* value in usecs to device units using:
* device units = usecs * mult / div
- *
+ * @eq_count: Number of shared event queues
*/
union ionic_dev_identity {
struct {
@@ -275,8 +277,9 @@ union ionic_dev_identity {
__le32 ndbpgs_per_lif;
__le32 intr_coal_mult;
__le32 intr_coal_div;
+ __le32 eq_count;
};
- __le32 words[512];
+ __le32 words[478];
};
enum ionic_lif_type {
@@ -286,10 +289,10 @@ enum ionic_lif_type {
};
/**
- * struct ionic_lif_identify_cmd - lif identify command
+ * struct ionic_lif_identify_cmd - LIF identify command
* @opcode: opcode
- * @type: lif type (enum lif_type)
- * @ver: version of identify returned by device
+ * @type: LIF type (enum ionic_lif_type)
+ * @ver: Version of identify returned by device
*/
struct ionic_lif_identify_cmd {
u8 opcode;
@@ -299,9 +302,9 @@ struct ionic_lif_identify_cmd {
};
/**
- * struct ionic_lif_identify_comp - lif identify command completion
- * @status: status of the command (enum status_code)
- * @ver: version of identify returned by device
+ * struct ionic_lif_identify_comp - LIF identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @ver: Version of identify returned by device
*/
struct ionic_lif_identify_comp {
u8 status;
@@ -309,13 +312,24 @@ struct ionic_lif_identify_comp {
u8 rsvd2[14];
};
+/**
+ * enum ionic_lif_capability - LIF capabilities
+ * @IONIC_LIF_CAP_ETH: LIF supports Ethernet
+ * @IONIC_LIF_CAP_RDMA: LIF support RDMA
+ */
enum ionic_lif_capability {
IONIC_LIF_CAP_ETH = BIT(0),
IONIC_LIF_CAP_RDMA = BIT(1),
};
/**
- * Logical Queue Types
+ * enum ionic_logical_qtype - Logical Queue Types
+ * @IONIC_QTYPE_ADMINQ: Administrative Queue
+ * @IONIC_QTYPE_NOTIFYQ: Notify Queue
+ * @IONIC_QTYPE_RXQ: Receive Queue
+ * @IONIC_QTYPE_TXQ: Transmit Queue
+ * @IONIC_QTYPE_EQ: Event Queue
+ * @IONIC_QTYPE_MAX: Max queue type supported
*/
enum ionic_logical_qtype {
IONIC_QTYPE_ADMINQ = 0,
@@ -327,10 +341,10 @@ enum ionic_logical_qtype {
};
/**
- * struct ionic_lif_logical_qtype - Descriptor of logical to hardware queue type.
- * @qtype: Hardware Queue Type.
- * @qid_count: Number of Queue IDs of the logical type.
- * @qid_base: Minimum Queue ID of the logical type.
+ * struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type
+ * @qtype: Hardware Queue Type
+ * @qid_count: Number of Queue IDs of the logical type
+ * @qid_base: Minimum Queue ID of the logical type
*/
struct ionic_lif_logical_qtype {
u8 qtype;
@@ -339,6 +353,12 @@ struct ionic_lif_logical_qtype {
__le32 qid_base;
};
+/**
+ * enum ionic_lif_state - LIF state
+ * @IONIC_LIF_DISABLE: LIF disabled
+ * @IONIC_LIF_ENABLE: LIF enabled
+ * @IONIC_LIF_HANG_RESET: LIF hung, being reset
+ */
enum ionic_lif_state {
IONIC_LIF_DISABLE = 0,
IONIC_LIF_ENABLE = 1,
@@ -346,13 +366,13 @@ enum ionic_lif_state {
};
/**
- * LIF configuration
- * @state: lif state (enum lif_state)
- * @name: lif name
- * @mtu: mtu
- * @mac: station mac address
- * @features: features (enum ionic_eth_hw_features)
- * @queue_count: queue counts per queue-type
+ * union ionic_lif_config - LIF configuration
+ * @state: LIF state (enum ionic_lif_state)
+ * @name: LIF name
+ * @mtu: MTU
+ * @mac: Station MAC address
+ * @features: Features (enum ionic_eth_hw_features)
+ * @queue_count: Queue counts per queue-type
*/
union ionic_lif_config {
struct {
@@ -369,37 +389,36 @@ union ionic_lif_config {
};
/**
- * struct ionic_lif_identity - lif identity information (type-specific)
+ * struct ionic_lif_identity - LIF identity information (type-specific)
*
- * @capabilities LIF capabilities
+ * @capabilities: LIF capabilities
*
- * Ethernet:
- * @version: Ethernet identify structure version.
- * @features: Ethernet features supported on this lif type.
- * @max_ucast_filters: Number of perfect unicast addresses supported.
- * @max_mcast_filters: Number of perfect multicast addresses supported.
- * @min_frame_size: Minimum size of frames to be sent
- * @max_frame_size: Maximim size of frames to be sent
- * @config: LIF config struct with features, mtu, mac, q counts
+ * @eth: Ethernet identify structure
+ * @version: Ethernet identify structure version
+ * @max_ucast_filters: Number of perfect unicast addresses supported
+ * @max_mcast_filters: Number of perfect multicast addresses supported
+ * @min_frame_size: Minimum size of frames to be sent
+ * @max_frame_size: Maximim size of frames to be sent
+ * @config: LIF config struct with features, mtu, mac, q counts
*
- * RDMA:
- * @version: RDMA version of opcodes and queue descriptors.
- * @qp_opcodes: Number of rdma queue pair opcodes supported.
- * @admin_opcodes: Number of rdma admin opcodes supported.
- * @npts_per_lif: Page table size per lif
- * @nmrs_per_lif: Number of memory regions per lif
- * @nahs_per_lif: Number of address handles per lif
- * @max_stride: Max work request stride.
- * @cl_stride: Cache line stride.
- * @pte_stride: Page table entry stride.
- * @rrq_stride: Remote RQ work request stride.
- * @rsq_stride: Remote SQ work request stride.
+ * @rdma: RDMA identify structure
+ * @version: RDMA version of opcodes and queue descriptors
+ * @qp_opcodes: Number of RDMA queue pair opcodes supported
+ * @admin_opcodes: Number of RDMA admin opcodes supported
+ * @npts_per_lif: Page table size per LIF
+ * @nmrs_per_lif: Number of memory regions per LIF
+ * @nahs_per_lif: Number of address handles per LIF
+ * @max_stride: Max work request stride
+ * @cl_stride: Cache line stride
+ * @pte_stride: Page table entry stride
+ * @rrq_stride: Remote RQ work request stride
+ * @rsq_stride: Remote SQ work request stride
* @dcqcn_profiles: Number of DCQCN profiles
- * @aq_qtype: RDMA Admin Qtype.
- * @sq_qtype: RDMA Send Qtype.
- * @rq_qtype: RDMA Receive Qtype.
- * @cq_qtype: RDMA Completion Qtype.
- * @eq_qtype: RDMA Event Qtype.
+ * @aq_qtype: RDMA Admin Qtype
+ * @sq_qtype: RDMA Send Qtype
+ * @rq_qtype: RDMA Receive Qtype
+ * @cq_qtype: RDMA Completion Qtype
+ * @eq_qtype: RDMA Event Qtype
*/
union ionic_lif_identity {
struct {
@@ -439,15 +458,15 @@ union ionic_lif_identity {
struct ionic_lif_logical_qtype eq_qtype;
} __packed rdma;
} __packed;
- __le32 words[512];
+ __le32 words[478];
};
/**
* struct ionic_lif_init_cmd - LIF init command
- * @opcode: opcode
- * @type: LIF type (enum lif_type)
+ * @opcode: Opcode
+ * @type: LIF type (enum ionic_lif_type)
* @index: LIF index
- * @info_pa: destination address for lif info (struct ionic_lif_info)
+ * @info_pa: Destination address for LIF info (struct ionic_lif_info)
*/
struct ionic_lif_init_cmd {
u8 opcode;
@@ -460,7 +479,8 @@ struct ionic_lif_init_cmd {
/**
* struct ionic_lif_init_comp - LIF init command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
+ * @hw_index: Hardware index of the initialized LIF
*/
struct ionic_lif_init_comp {
u8 status;
@@ -469,14 +489,74 @@ struct ionic_lif_init_comp {
u8 rsvd2[12];
};
+ /**
+ * struct ionic_q_identify_cmd - queue identify command
+ * @opcode: opcode
+ * @lif_type: LIF type (enum ionic_lif_type)
+ * @type: Logical queue type (enum ionic_logical_qtype)
+ * @ver: Highest queue type version that the driver supports
+ */
+struct ionic_q_identify_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 lif_type;
+ u8 type;
+ u8 ver;
+ u8 rsvd2[58];
+};
+
+/**
+ * struct ionic_q_identify_comp - queue identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @ver: Queue type version that can be used with FW
+ */
+struct ionic_q_identify_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 ver;
+ u8 rsvd2[11];
+};
+
+/**
+ * union ionic_q_identity - queue identity information
+ * @version: Queue type version that can be used with FW
+ * @supported: Bitfield of queue versions, first bit = ver 0
+ * @features: Queue features
+ * @desc_sz: Descriptor size
+ * @comp_sz: Completion descriptor size
+ * @sg_desc_sz: Scatter/Gather descriptor size
+ * @max_sg_elems: Maximum number of Scatter/Gather elements
+ * @sg_desc_stride: Number of Scatter/Gather elements per descriptor
+ */
+union ionic_q_identity {
+ struct {
+ u8 version;
+ u8 supported;
+ u8 rsvd[6];
+#define IONIC_QIDENT_F_CQ 0x01 /* queue has completion ring */
+#define IONIC_QIDENT_F_SG 0x02 /* queue has scatter/gather ring */
+#define IONIC_QIDENT_F_EQ 0x04 /* queue can use event queue */
+#define IONIC_QIDENT_F_CMB 0x08 /* queue is in cmb bar */
+ __le64 features;
+ __le16 desc_sz;
+ __le16 comp_sz;
+ __le16 sg_desc_sz;
+ __le16 max_sg_elems;
+ __le16 sg_desc_stride;
+ };
+ __le32 words[478];
+};
+
/**
* struct ionic_q_init_cmd - Queue init command
* @opcode: opcode
* @type: Logical queue type
- * @ver: Queue version (defines opcode/descriptor scope)
+ * @ver: Queue type version
* @lif_index: LIF index
- * @index: (lif, qtype) relative admin queue index
- * @intr_index: Interrupt control register index
+ * @index: (LIF, qtype) relative admin queue index
+ * @intr_index: Interrupt control register index, or Event queue index
* @pid: Process ID
* @flags:
* IRQ: Interrupt requested on completion
@@ -494,12 +574,11 @@ struct ionic_lif_init_comp {
* descriptors. Values of ring_size <2 and >16 are
* reserved.
* EQ: Enable the Event Queue
- * @cos: Class of service for this queue.
+ * @cos: Class of service for this queue
* @ring_size: Queue ring size, encoded as a log2(size)
* @ring_base: Queue ring base address
* @cq_ring_base: Completion queue ring base address
* @sg_ring_base: Scatter/Gather ring base address
- * @eq_index: Event queue index
*/
struct ionic_q_init_cmd {
u8 opcode;
@@ -516,29 +595,27 @@ struct ionic_q_init_cmd {
#define IONIC_QINIT_F_ENA 0x02 /* Enable the queue */
#define IONIC_QINIT_F_SG 0x04 /* Enable scatter/gather on the queue */
#define IONIC_QINIT_F_EQ 0x08 /* Enable event queue */
-#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */
+#define IONIC_QINIT_F_CMB 0x10 /* Enable cmb-based queue */
+#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */
u8 cos;
u8 ring_size;
__le64 ring_base;
__le64 cq_ring_base;
__le64 sg_ring_base;
- __le32 eq_index;
- u8 rsvd2[16];
+ u8 rsvd2[20];
} __packed;
/**
* struct ionic_q_init_comp - Queue init command completion
- * @status: The status of the command (enum status_code)
- * @ver: Queue version (defines opcode/descriptor scope)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @hw_index: Hardware Queue ID
* @hw_type: Hardware Queue type
* @color: Color
*/
struct ionic_q_init_comp {
u8 status;
- u8 ver;
+ u8 rsvd;
__le16 comp_index;
__le32 hw_index;
u8 hw_type;
@@ -559,10 +636,9 @@ enum ionic_txq_desc_opcode {
/**
* struct ionic_txq_desc - Ethernet Tx queue descriptor format
- * @opcode: Tx operation, see TXQ_DESC_OPCODE_*:
+ * @cmd: Tx operation, see IONIC_TXQ_DESC_OPCODE_*:
*
* IONIC_TXQ_DESC_OPCODE_CSUM_NONE:
- *
* Non-offload send. No segmentation,
* fragmentation or checksum calc/insertion is
* performed by device; packet is prepared
@@ -570,7 +646,6 @@ enum ionic_txq_desc_opcode {
* no further manipulation from device.
*
* IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL:
- *
* Offload 16-bit L4 checksum
* calculation/insertion. The device will
* calculate the L4 checksum value and
@@ -579,14 +654,16 @@ enum ionic_txq_desc_opcode {
* is calculated starting at @csum_start bytes
* into the packet to the end of the packet.
* The checksum insertion position is given
- * in @csum_offset. This feature is only
- * applicable to protocols such as TCP, UDP
- * and ICMP where a standard (i.e. the
- * 'IP-style' checksum) one's complement
- * 16-bit checksum is used, using an IP
- * pseudo-header to seed the calculation.
- * Software will preload the L4 checksum
- * field with the IP pseudo-header checksum.
+ * in @csum_offset, which is the offset from
+ * @csum_start to the checksum field in the L4
+ * header. This feature is only applicable to
+ * protocols such as TCP, UDP and ICMP where a
+ * standard (i.e. the 'IP-style' checksum)
+ * one's complement 16-bit checksum is used,
+ * using an IP pseudo-header to seed the
+ * calculation. Software will preload the L4
+ * checksum field with the IP pseudo-header
+ * checksum.
*
* For tunnel encapsulation, @csum_start and
* @csum_offset refer to the inner L4
@@ -602,7 +679,6 @@ enum ionic_txq_desc_opcode {
* for more info).
*
* IONIC_TXQ_DESC_OPCODE_CSUM_HW:
- *
* Offload 16-bit checksum computation to hardware.
* If @csum_l3 is set then the packet's L3 checksum is
* updated. Similarly, if @csum_l4 is set the the L4
@@ -610,7 +686,6 @@ enum ionic_txq_desc_opcode {
* checksums are also updated.
*
* IONIC_TXQ_DESC_OPCODE_TSO:
- *
* Device preforms TCP segmentation offload
* (TSO). @hdr_len is the number of bytes
* to the end of TCP header (the offset to
@@ -637,40 +712,41 @@ enum ionic_txq_desc_opcode {
* clear CWR in remaining segments.
* @flags:
* vlan:
- * Insert an L2 VLAN header using @vlan_tci.
+ * Insert an L2 VLAN header using @vlan_tci
* encap:
- * Calculate encap header checksum.
+ * Calculate encap header checksum
* csum_l3:
- * Compute L3 header checksum.
+ * Compute L3 header checksum
* csum_l4:
- * Compute L4 header checksum.
+ * Compute L4 header checksum
* tso_sot:
* TSO start
* tso_eot:
* TSO end
* @num_sg_elems: Number of scatter-gather elements in SG
* descriptor
- * @addr: First data buffer's DMA address.
- * (Subsequent data buffers are on txq_sg_desc).
+ * @addr: First data buffer's DMA address
+ * (Subsequent data buffers are on txq_sg_desc)
* @len: First data buffer's length, in bytes
* @vlan_tci: VLAN tag to insert in the packet (if requested
* by @V-bit). Includes .1p and .1q tags
* @hdr_len: Length of packet headers, including
- * encapsulating outer header, if applicable.
- * Valid for opcodes TXQ_DESC_OPCODE_CALC_CSUM and
- * TXQ_DESC_OPCODE_TSO. Should be set to zero for
+ * encapsulating outer header, if applicable
+ * Valid for opcodes IONIC_TXQ_DESC_OPCODE_CALC_CSUM and
+ * IONIC_TXQ_DESC_OPCODE_TSO. Should be set to zero for
* all other modes. For
- * TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length
+ * IONIC_TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length
* of headers up to inner-most L4 header. For
- * TXQ_DESC_OPCODE_TSO, @hdr_len is up to
+ * IONIC_TXQ_DESC_OPCODE_TSO, @hdr_len is up to
* inner-most L4 payload, so inclusive of
* inner-most L4 header.
- * @mss: Desired MSS value for TSO. Only applicable for
- * TXQ_DESC_OPCODE_TSO.
- * @csum_start: Offset into inner-most L3 header of checksum
- * @csum_offset: Offset into inner-most L4 header of checksum
+ * @mss: Desired MSS value for TSO; only applicable for
+ * IONIC_TXQ_DESC_OPCODE_TSO
+ * @csum_start: Offset from packet to first byte checked in L4 checksum
+ * @csum_offset: Offset from csum_start to L4 checksum field
*/
-
+struct ionic_txq_desc {
+ __le64 cmd;
#define IONIC_TXQ_DESC_OPCODE_MASK 0xf
#define IONIC_TXQ_DESC_OPCODE_SHIFT 4
#define IONIC_TXQ_DESC_FLAGS_MASK 0xf
@@ -692,8 +768,6 @@ enum ionic_txq_desc_opcode {
#define IONIC_TXQ_DESC_FLAG_TSO_SOT 0x4
#define IONIC_TXQ_DESC_FLAG_TSO_EOT 0x8
-struct ionic_txq_desc {
- __le64 cmd;
__le16 len;
union {
__le16 vlan_tci;
@@ -733,28 +807,38 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
*addr = (cmd >> IONIC_TXQ_DESC_ADDR_SHIFT) & IONIC_TXQ_DESC_ADDR_MASK;
};
-#define IONIC_TX_MAX_SG_ELEMS 8
-#define IONIC_RX_MAX_SG_ELEMS 8
-
/**
- * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
+ * struct ionic_txq_sg_elem - Transmit scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
+struct ionic_txq_sg_elem {
+ __le64 addr;
+ __le16 len;
+ __le16 rsvd[3];
+};
+
+/**
+ * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
+ * @elems: Scatter-gather elements
+ */
struct ionic_txq_sg_desc {
- struct ionic_txq_sg_elem {
- __le64 addr;
- __le16 len;
- __le16 rsvd[3];
- } elems[IONIC_TX_MAX_SG_ELEMS];
+#define IONIC_TX_MAX_SG_ELEMS 8
+#define IONIC_TX_SG_DESC_STRIDE 8
+ struct ionic_txq_sg_elem elems[IONIC_TX_MAX_SG_ELEMS];
+};
+
+struct ionic_txq_sg_desc_v1 {
+#define IONIC_TX_MAX_SG_ELEMS_V1 15
+#define IONIC_TX_SG_DESC_STRIDE_V1 16
+ struct ionic_txq_sg_elem elems[IONIC_TX_SG_DESC_STRIDE_V1];
};
/**
* struct ionic_txq_comp - Ethernet transmit queue completion descriptor
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @color: Color bit.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @color: Color bit
*/
struct ionic_txq_comp {
u8 status;
@@ -771,16 +855,15 @@ enum ionic_rxq_desc_opcode {
/**
* struct ionic_rxq_desc - Ethernet Rx queue descriptor format
- * @opcode: Rx operation, see RXQ_DESC_OPCODE_*:
- *
- * RXQ_DESC_OPCODE_SIMPLE:
+ * @opcode: Rx operation, see IONIC_RXQ_DESC_OPCODE_*:
*
+ * IONIC_RXQ_DESC_OPCODE_SIMPLE:
* Receive full packet into data buffer
* starting at @addr. Results of
* receive, including actual bytes received,
* are recorded in Rx completion descriptor.
*
- * @len: Data buffer's length, in bytes.
+ * @len: Data buffer's length, in bytes
* @addr: Data buffer's DMA address
*/
struct ionic_rxq_desc {
@@ -791,26 +874,33 @@ struct ionic_rxq_desc {
};
/**
- * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
+ * struct ionic_rxq_sg_elem - Receive scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
+struct ionic_rxq_sg_elem {
+ __le64 addr;
+ __le16 len;
+ __le16 rsvd[3];
+};
+
+/**
+ * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
+ * @elems: Scatter-gather elements
+ */
struct ionic_rxq_sg_desc {
- struct ionic_rxq_sg_elem {
- __le64 addr;
- __le16 len;
- __le16 rsvd[3];
- } elems[IONIC_RX_MAX_SG_ELEMS];
+#define IONIC_RX_MAX_SG_ELEMS 8
+#define IONIC_RX_SG_DESC_STRIDE 8
+ struct ionic_rxq_sg_elem elems[IONIC_RX_SG_DESC_STRIDE];
};
/**
* struct ionic_rxq_comp - Ethernet receive queue completion descriptor
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @num_sg_elems: Number of SG elements used by this descriptor
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @rss_hash: 32-bit RSS hash
- * @csum: 16-bit sum of the packet's L2 payload.
+ * @csum: 16-bit sum of the packet's L2 payload
* If the packet's L2 payload is odd length, an extra
* zero-value byte is included in the @csum calculation but
* not included in @len.
@@ -818,33 +908,51 @@ struct ionic_rxq_sg_desc {
* set. Includes .1p and .1q tags.
* @len: Received packet length, in bytes. Excludes FCS.
* @csum_calc L2 payload checksum is computed or not
- * @csum_tcp_ok: The TCP checksum calculated by the device
- * matched the checksum in the receive packet's
- * TCP header
- * @csum_tcp_bad: The TCP checksum calculated by the device did
- * not match the checksum in the receive packet's
- * TCP header.
- * @csum_udp_ok: The UDP checksum calculated by the device
- * matched the checksum in the receive packet's
- * UDP header
- * @csum_udp_bad: The UDP checksum calculated by the device did
- * not match the checksum in the receive packet's
- * UDP header.
- * @csum_ip_ok: The IPv4 checksum calculated by the device
- * matched the checksum in the receive packet's
- * first IPv4 header. If the receive packet
- * contains both a tunnel IPv4 header and a
- * transport IPv4 header, the device validates the
- * checksum for the both IPv4 headers.
- * @csum_ip_bad: The IPv4 checksum calculated by the device did
- * not match the checksum in the receive packet's
- * first IPv4 header. If the receive packet
- * contains both a tunnel IPv4 header and a
- * transport IPv4 header, the device validates the
- * checksum for both IP headers.
- * @VLAN: VLAN header was stripped and placed in @vlan_tci.
- * @pkt_type: Packet type
- * @color: Color bit.
+ * @csum_flags: See IONIC_RXQ_COMP_CSUM_F_*:
+ *
+ * IONIC_RXQ_COMP_CSUM_F_TCP_OK:
+ * The TCP checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * TCP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_TCP_BAD:
+ * The TCP checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * TCP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_UDP_OK:
+ * The UDP checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * UDP header
+ *
+ * IONIC_RXQ_COMP_CSUM_F_UDP_BAD:
+ * The UDP checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * UDP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_IP_OK:
+ * The IPv4 checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * first IPv4 header. If the receive packet
+ * contains both a tunnel IPv4 header and a
+ * transport IPv4 header, the device validates the
+ * checksum for the both IPv4 headers.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_IP_BAD:
+ * The IPv4 checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * first IPv4 header. If the receive packet
+ * contains both a tunnel IPv4 header and a
+ * transport IPv4 header, the device validates the
+ * checksum for both IP headers.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_VLAN:
+ * The VLAN header was stripped and placed in @vlan_tci.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_CALC:
+ * The checksum was calculated by the device.
+ *
+ * @pkt_type_color: Packet type and color bit; see IONIC_RXQ_COMP_PKT_TYPE_MASK
*/
struct ionic_rxq_comp {
u8 status;
@@ -891,8 +999,8 @@ enum ionic_eth_hw_features {
IONIC_ETH_HW_TSO_ECN = BIT(10),
IONIC_ETH_HW_TSO_GRE = BIT(11),
IONIC_ETH_HW_TSO_GRE_CSUM = BIT(12),
- IONIC_ETH_HW_TSO_IPXIP4 = BIT(13),
- IONIC_ETH_HW_TSO_IPXIP6 = BIT(14),
+ IONIC_ETH_HW_TSO_IPXIP4 = BIT(13),
+ IONIC_ETH_HW_TSO_IPXIP6 = BIT(14),
IONIC_ETH_HW_TSO_UDP = BIT(15),
IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16),
};
@@ -923,7 +1031,10 @@ enum q_control_oper {
};
/**
- * Physical connection type
+ * enum ionic_phy_type - Physical connection type
+ * @IONIC_PHY_TYPE_NONE: No PHY installed
+ * @IONIC_PHY_TYPE_COPPER: Copper PHY
+ * @IONIC_PHY_TYPE_FIBER: Fiber PHY
*/
enum ionic_phy_type {
IONIC_PHY_TYPE_NONE = 0,
@@ -932,18 +1043,23 @@ enum ionic_phy_type {
};
/**
- * Transceiver status
+ * enum ionic_xcvr_state - Transceiver status
+ * @IONIC_XCVR_STATE_REMOVED: Transceiver removed
+ * @IONIC_XCVR_STATE_INSERTED: Transceiver inserted
+ * @IONIC_XCVR_STATE_PENDING: Transceiver pending
+ * @IONIC_XCVR_STATE_SPROM_READ: Transceiver data read
+ * @IONIC_XCVR_STATE_SPROM_READ_ERR: Transceiver data read error
*/
enum ionic_xcvr_state {
IONIC_XCVR_STATE_REMOVED = 0,
IONIC_XCVR_STATE_INSERTED = 1,
IONIC_XCVR_STATE_PENDING = 2,
IONIC_XCVR_STATE_SPROM_READ = 3,
- IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
+ IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
};
/**
- * Supported link modes
+ * enum ionic_xcvr_pid - Supported link modes
*/
enum ionic_xcvr_pid {
IONIC_XCVR_PID_UNKNOWN = 0,
@@ -977,64 +1093,83 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_10GBASE_CU = 68,
IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69,
IONIC_XCVR_PID_QSFP_100G_PSM4 = 70,
+ IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
};
/**
- * Port types
+ * enum ionic_port_type - Port types
+ * @IONIC_PORT_TYPE_NONE: Port type not configured
+ * @IONIC_PORT_TYPE_ETH: Port carries ethernet traffic (inband)
+ * @IONIC_PORT_TYPE_MGMT: Port carries mgmt traffic (out-of-band)
*/
enum ionic_port_type {
- IONIC_PORT_TYPE_NONE = 0, /* port type not configured */
- IONIC_PORT_TYPE_ETH = 1, /* port carries ethernet traffic (inband) */
- IONIC_PORT_TYPE_MGMT = 2, /* port carries mgmt traffic (out-of-band) */
+ IONIC_PORT_TYPE_NONE = 0,
+ IONIC_PORT_TYPE_ETH = 1,
+ IONIC_PORT_TYPE_MGMT = 2,
};
/**
- * Port config state
+ * enum ionic_port_admin_state - Port config state
+ * @IONIC_PORT_ADMIN_STATE_NONE: Port admin state not configured
+ * @IONIC_PORT_ADMIN_STATE_DOWN: Port admin disabled
+ * @IONIC_PORT_ADMIN_STATE_UP: Port admin enabled
*/
enum ionic_port_admin_state {
- IONIC_PORT_ADMIN_STATE_NONE = 0, /* port admin state not configured */
- IONIC_PORT_ADMIN_STATE_DOWN = 1, /* port is admin disabled */
- IONIC_PORT_ADMIN_STATE_UP = 2, /* port is admin enabled */
+ IONIC_PORT_ADMIN_STATE_NONE = 0,
+ IONIC_PORT_ADMIN_STATE_DOWN = 1,
+ IONIC_PORT_ADMIN_STATE_UP = 2,
};
/**
- * Port operational status
+ * enum ionic_port_oper_status - Port operational status
+ * @IONIC_PORT_OPER_STATUS_NONE: Port disabled
+ * @IONIC_PORT_OPER_STATUS_UP: Port link status up
+ * @IONIC_PORT_OPER_STATUS_DOWN: Port link status down
*/
enum ionic_port_oper_status {
- IONIC_PORT_OPER_STATUS_NONE = 0, /* port is disabled */
- IONIC_PORT_OPER_STATUS_UP = 1, /* port is linked up */
- IONIC_PORT_OPER_STATUS_DOWN = 2, /* port link status is down */
+ IONIC_PORT_OPER_STATUS_NONE = 0,
+ IONIC_PORT_OPER_STATUS_UP = 1,
+ IONIC_PORT_OPER_STATUS_DOWN = 2,
};
/**
- * Ethernet Forward error correction (fec) modes
+ * enum ionic_port_fec_type - Ethernet Forward error correction (FEC) modes
+ * @IONIC_PORT_FEC_TYPE_NONE: FEC Disabled
+ * @IONIC_PORT_FEC_TYPE_FC: FireCode FEC
+ * @IONIC_PORT_FEC_TYPE_RS: ReedSolomon FEC
*/
enum ionic_port_fec_type {
- IONIC_PORT_FEC_TYPE_NONE = 0, /* Disabled */
- IONIC_PORT_FEC_TYPE_FC = 1, /* FireCode */
- IONIC_PORT_FEC_TYPE_RS = 2, /* ReedSolomon */
+ IONIC_PORT_FEC_TYPE_NONE = 0,
+ IONIC_PORT_FEC_TYPE_FC = 1,
+ IONIC_PORT_FEC_TYPE_RS = 2,
};
/**
- * Ethernet pause (flow control) modes
+ * enum ionic_port_pause_type - Ethernet pause (flow control) modes
+ * @IONIC_PORT_PAUSE_TYPE_NONE: Disable Pause
+ * @IONIC_PORT_PAUSE_TYPE_LINK: Link level pause
+ * @IONIC_PORT_PAUSE_TYPE_PFC: Priority-Flow Control
*/
enum ionic_port_pause_type {
- IONIC_PORT_PAUSE_TYPE_NONE = 0, /* Disable Pause */
- IONIC_PORT_PAUSE_TYPE_LINK = 1, /* Link level pause */
- IONIC_PORT_PAUSE_TYPE_PFC = 2, /* Priority-Flow control */
+ IONIC_PORT_PAUSE_TYPE_NONE = 0,
+ IONIC_PORT_PAUSE_TYPE_LINK = 1,
+ IONIC_PORT_PAUSE_TYPE_PFC = 2,
};
/**
- * Loopback modes
+ * enum ionic_port_loopback_mode - Loopback modes
+ * @IONIC_PORT_LOOPBACK_MODE_NONE: Disable loopback
+ * @IONIC_PORT_LOOPBACK_MODE_MAC: MAC loopback
+ * @IONIC_PORT_LOOPBACK_MODE_PHY: PHY/SerDes loopback
*/
enum ionic_port_loopback_mode {
- IONIC_PORT_LOOPBACK_MODE_NONE = 0, /* Disable loopback */
- IONIC_PORT_LOOPBACK_MODE_MAC = 1, /* MAC loopback */
- IONIC_PORT_LOOPBACK_MODE_PHY = 2, /* PHY/Serdes loopback */
+ IONIC_PORT_LOOPBACK_MODE_NONE = 0,
+ IONIC_PORT_LOOPBACK_MODE_MAC = 1,
+ IONIC_PORT_LOOPBACK_MODE_PHY = 2,
};
/**
- * Transceiver Status information
+ * struct ionic_xcvr_status - Transceiver Status information
* @state: Transceiver status (enum ionic_xcvr_state)
* @phy: Physical connection type (enum ionic_phy_type)
* @pid: Transceiver link mode (enum pid)
@@ -1048,7 +1183,7 @@ struct ionic_xcvr_status {
};
/**
- * Port configuration
+ * union ionic_port_config - Port configuration
* @speed: port speed (in Mbps)
* @mtu: mtu
* @state: port admin state (enum port_admin_state)
@@ -1081,17 +1216,21 @@ union ionic_port_config {
};
/**
- * Port Status information
+ * struct ionic_port_status - Port Status information
* @status: link status (enum ionic_port_oper_status)
* @id: port id
* @speed: link speed (in Mbps)
+ * @link_down_count: number of times link went from from up to down
+ * @fec_type: fec type (enum ionic_port_fec_type)
* @xcvr: tranceiver status
*/
struct ionic_port_status {
__le32 id;
__le32 speed;
u8 status;
- u8 rsvd[51];
+ __le16 link_down_count;
+ u8 fec_type;
+ u8 rsvd[48];
struct ionic_xcvr_status xcvr;
} __packed;
@@ -1110,7 +1249,7 @@ struct ionic_port_identify_cmd {
/**
* struct ionic_port_identify_comp - Port identify command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
*/
struct ionic_port_identify_comp {
@@ -1135,7 +1274,7 @@ struct ionic_port_init_cmd {
/**
* struct ionic_port_init_comp - Port initialization command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_port_init_comp {
u8 status;
@@ -1155,7 +1294,7 @@ struct ionic_port_reset_cmd {
/**
* struct ionic_port_reset_comp - Port reset command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_port_reset_comp {
u8 status;
@@ -1163,15 +1302,23 @@ struct ionic_port_reset_comp {
};
/**
- * enum stats_ctl_cmd - List of commands for stats control
+ * enum ionic_stats_ctl_cmd - List of commands for stats control
+ * @IONIC_STATS_CTL_RESET: Reset statistics
*/
enum ionic_stats_ctl_cmd {
IONIC_STATS_CTL_RESET = 0,
};
-
/**
* enum ionic_port_attr - List of device attributes
+ * @IONIC_PORT_ATTR_STATE: Port state attribute
+ * @IONIC_PORT_ATTR_SPEED: Port speed attribute
+ * @IONIC_PORT_ATTR_MTU: Port MTU attribute
+ * @IONIC_PORT_ATTR_AUTONEG: Port autonegotation attribute
+ * @IONIC_PORT_ATTR_FEC: Port FEC attribute
+ * @IONIC_PORT_ATTR_PAUSE: Port pause attribute
+ * @IONIC_PORT_ATTR_LOOPBACK: Port loopback attribute
+ * @IONIC_PORT_ATTR_STATS_CTRL: Port statistics control attribute
*/
enum ionic_port_attr {
IONIC_PORT_ATTR_STATE = 0,
@@ -1186,9 +1333,17 @@ enum ionic_port_attr {
/**
* struct ionic_port_setattr_cmd - Set port attributes on the NIC
- * @opcode: Opcode
- * @index: port index
- * @attr: Attribute type (enum ionic_port_attr)
+ * @opcode: Opcode
+ * @index: Port index
+ * @attr: Attribute type (enum ionic_port_attr)
+ * @state: Port state
+ * @speed: Port speed
+ * @mtu: Port MTU
+ * @an_enable: Port autonegotiation setting
+ * @fec_type: Port FEC type setting
+ * @pause_type: Port pause type setting
+ * @loopback_mode: Port loopback mode
+ * @stats_ctl: Port stats setting
*/
struct ionic_port_setattr_cmd {
u8 opcode;
@@ -1203,14 +1358,14 @@ struct ionic_port_setattr_cmd {
u8 fec_type;
u8 pause_type;
u8 loopback_mode;
- u8 stats_ctl;
+ u8 stats_ctl;
u8 rsvd2[60];
};
};
/**
* struct ionic_port_setattr_comp - Port set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @color: Color bit
*/
struct ionic_port_setattr_comp {
@@ -1234,8 +1389,15 @@ struct ionic_port_getattr_cmd {
/**
* struct ionic_port_getattr_comp - Port get attr command completion
- * @status: The status of the command (enum status_code)
- * @color: Color bit
+ * @status: Status of the command (enum ionic_status_code)
+ * @state: Port state
+ * @speed: Port speed
+ * @mtu: Port MTU
+ * @an_enable: Port autonegotiation setting
+ * @fec_type: Port FEC type setting
+ * @pause_type: Port pause type setting
+ * @loopback_mode: Port loopback mode
+ * @color: Color bit
*/
struct ionic_port_getattr_comp {
u8 status;
@@ -1254,12 +1416,12 @@ struct ionic_port_getattr_comp {
};
/**
- * struct ionic_lif_status - Lif status register
+ * struct ionic_lif_status - LIF status register
* @eid: most recent NotifyQ event id
- * @port_num: port the lif is connected to
+ * @port_num: port the LIF is connected to
* @link_status: port status (enum ionic_port_oper_status)
* @link_speed: speed of link in Mbps
- * @link_down_count: number of times link status changes
+ * @link_down_count: number of times link went from up to down
*/
struct ionic_lif_status {
__le64 eid;
@@ -1293,6 +1455,9 @@ enum ionic_dev_state {
/**
* enum ionic_dev_attr - List of device attributes
+ * @IONIC_DEV_ATTR_STATE: Device state attribute
+ * @IONIC_DEV_ATTR_NAME: Device name attribute
+ * @IONIC_DEV_ATTR_FEATURES: Device feature attributes
*/
enum ionic_dev_attr {
IONIC_DEV_ATTR_STATE = 0,
@@ -1322,7 +1487,7 @@ struct ionic_dev_setattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @features: Device features
* @color: Color bit
*/
@@ -1349,7 +1514,7 @@ struct ionic_dev_getattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @features: Device features
* @color: Color bit
*/
@@ -1379,6 +1544,13 @@ enum ionic_rss_hash_types {
/**
* enum ionic_lif_attr - List of LIF attributes
+ * @IONIC_LIF_ATTR_STATE: LIF state attribute
+ * @IONIC_LIF_ATTR_NAME: LIF name attribute
+ * @IONIC_LIF_ATTR_MTU: LIF MTU attribute
+ * @IONIC_LIF_ATTR_MAC: LIF MAC attribute
+ * @IONIC_LIF_ATTR_FEATURES: LIF features attribute
+ * @IONIC_LIF_ATTR_RSS: LIF RSS attribute
+ * @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1393,18 +1565,18 @@ enum ionic_lif_attr {
/**
* struct ionic_lif_setattr_cmd - Set LIF attributes on the NIC
* @opcode: Opcode
- * @type: Attribute type (enum ionic_lif_attr)
+ * @attr: Attribute type (enum ionic_lif_attr)
* @index: LIF index
- * @state: lif state (enum lif_state)
+ * @state: LIF state (enum ionic_lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
* @rss: RSS properties
- * @types: The hash types to enable (see rss_hash_types).
- * @key: The hash secret key.
- * @addr: Address for the indirection table shared memory.
- * @stats_ctl: stats control commands (enum stats_ctl_cmd)
+ * @types: The hash types to enable (see rss_hash_types)
+ * @key: The hash secret key
+ * @addr: Address for the indirection table shared memory
+ * @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd)
*/
struct ionic_lif_setattr_cmd {
u8 opcode;
@@ -1422,16 +1594,15 @@ struct ionic_lif_setattr_cmd {
u8 rsvd[6];
__le64 addr;
} rss;
- u8 stats_ctl;
+ u8 stats_ctl;
u8 rsvd[60];
} __packed;
};
/**
* struct ionic_lif_setattr_comp - LIF set attr command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @features: features (enum ionic_eth_hw_features)
* @color: Color bit
*/
@@ -1461,10 +1632,9 @@ struct ionic_lif_getattr_cmd {
/**
* struct ionic_lif_getattr_comp - LIF get attr command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @state: lif state (enum lif_state)
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @state: LIF state (enum ionic_lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
@@ -1486,11 +1656,12 @@ struct ionic_lif_getattr_comp {
};
enum ionic_rx_mode {
- IONIC_RX_MODE_F_UNICAST = BIT(0),
- IONIC_RX_MODE_F_MULTICAST = BIT(1),
- IONIC_RX_MODE_F_BROADCAST = BIT(2),
- IONIC_RX_MODE_F_PROMISC = BIT(3),
- IONIC_RX_MODE_F_ALLMULTI = BIT(4),
+ IONIC_RX_MODE_F_UNICAST = BIT(0),
+ IONIC_RX_MODE_F_MULTICAST = BIT(1),
+ IONIC_RX_MODE_F_BROADCAST = BIT(2),
+ IONIC_RX_MODE_F_PROMISC = BIT(3),
+ IONIC_RX_MODE_F_ALLMULTI = BIT(4),
+ IONIC_RX_MODE_F_RDMA_SNIFFER = BIT(5),
};
/**
@@ -1498,11 +1669,12 @@ enum ionic_rx_mode {
* @opcode: opcode
* @lif_index: LIF index
* @rx_mode: Rx mode flags:
- * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets.
- * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets.
- * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets.
- * IONIC_RX_MODE_F_PROMISC: Accept any packets.
- * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets.
+ * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets
+ * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets
+ * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets
+ * IONIC_RX_MODE_F_PROMISC: Accept any packets
+ * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets
+ * IONIC_RX_MODE_F_RDMA_SNIFFER: Sniff RDMA packets
*/
struct ionic_rx_mode_set_cmd {
u8 opcode;
@@ -1526,9 +1698,14 @@ enum ionic_rx_filter_match_type {
* @qtype: Queue type
* @lif_index: LIF index
* @qid: Queue ID
- * @match: Rx filter match type. (See IONIC_RX_FILTER_MATCH_xxx)
- * @vlan: VLAN ID
- * @addr: MAC address (network-byte order)
+ * @match: Rx filter match type (see IONIC_RX_FILTER_MATCH_xxx)
+ * @vlan: VLAN filter
+ * @vlan: VLAN ID
+ * @mac: MAC filter
+ * @addr: MAC address (network-byte order)
+ * @mac_vlan: MACVLAN filter
+ * @vlan: VLAN ID
+ * @addr: MAC address (network-byte order)
*/
struct ionic_rx_filter_add_cmd {
u8 opcode;
@@ -1553,11 +1730,10 @@ struct ionic_rx_filter_add_cmd {
/**
* struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @filter_id: Filter ID
- * @color: Color bit.
+ * @color: Color bit
*/
struct ionic_rx_filter_add_comp {
u8 status;
@@ -1584,63 +1760,6 @@ struct ionic_rx_filter_del_cmd {
typedef struct ionic_admin_comp ionic_rx_filter_del_comp;
-/**
- * struct ionic_qos_identify_cmd - QoS identify command
- * @opcode: opcode
- * @ver: Highest version of identify supported by driver
- *
- */
-struct ionic_qos_identify_cmd {
- u8 opcode;
- u8 ver;
- u8 rsvd[62];
-};
-
-/**
- * struct ionic_qos_identify_comp - QoS identify command completion
- * @status: The status of the command (enum status_code)
- * @ver: Version of identify returned by device
- */
-struct ionic_qos_identify_comp {
- u8 status;
- u8 ver;
- u8 rsvd[14];
-};
-
-#define IONIC_QOS_CLASS_MAX 7
-#define IONIC_QOS_CLASS_NAME_SZ 32
-#define IONIC_QOS_DSCP_MAX_VALUES 64
-
-/**
- * enum ionic_qos_class
- */
-enum ionic_qos_class {
- IONIC_QOS_CLASS_DEFAULT = 0,
- IONIC_QOS_CLASS_USER_DEFINED_1 = 1,
- IONIC_QOS_CLASS_USER_DEFINED_2 = 2,
- IONIC_QOS_CLASS_USER_DEFINED_3 = 3,
- IONIC_QOS_CLASS_USER_DEFINED_4 = 4,
- IONIC_QOS_CLASS_USER_DEFINED_5 = 5,
- IONIC_QOS_CLASS_USER_DEFINED_6 = 6,
-};
-
-/**
- * enum ionic_qos_class_type - Traffic classification criteria
- */
-enum ionic_qos_class_type {
- IONIC_QOS_CLASS_TYPE_NONE = 0,
- IONIC_QOS_CLASS_TYPE_PCP = 1, /* Dot1Q pcp */
- IONIC_QOS_CLASS_TYPE_DSCP = 2, /* IP dscp */
-};
-
-/**
- * enum ionic_qos_sched_type - Qos class scheduling type
- */
-enum ionic_qos_sched_type {
- IONIC_QOS_SCHED_TYPE_STRICT = 0, /* Strict priority */
- IONIC_QOS_SCHED_TYPE_DWRR = 1, /* Deficit weighted round-robin */
-};
-
enum ionic_vf_attr {
IONIC_VF_ATTR_SPOOFCHK = 1,
IONIC_VF_ATTR_TRUST = 2,
@@ -1652,26 +1771,29 @@ enum ionic_vf_attr {
};
/**
- * VF link status
+ * enum ionic_vf_link_status - Virtual Function link status
+ * @IONIC_VF_LINK_STATUS_AUTO: Use link state of the uplink
+ * @IONIC_VF_LINK_STATUS_UP: Link always up
+ * @IONIC_VF_LINK_STATUS_DOWN: Link always down
*/
enum ionic_vf_link_status {
- IONIC_VF_LINK_STATUS_AUTO = 0, /* link state of the uplink */
- IONIC_VF_LINK_STATUS_UP = 1, /* link is always up */
- IONIC_VF_LINK_STATUS_DOWN = 2, /* link is always down */
+ IONIC_VF_LINK_STATUS_AUTO = 0,
+ IONIC_VF_LINK_STATUS_UP = 1,
+ IONIC_VF_LINK_STATUS_DOWN = 2,
};
/**
* struct ionic_vf_setattr_cmd - Set VF attributes on the NIC
* @opcode: Opcode
- * @index: VF index
* @attr: Attribute type (enum ionic_vf_attr)
- * macaddr mac address
- * vlanid vlan ID
- * maxrate max Tx rate in Mbps
- * spoofchk enable address spoof checking
- * trust enable VF trust
- * linkstate set link up or down
- * stats_pa set DMA address for VF stats
+ * @vf_index: VF index
+ * @macaddr: mac address
+ * @vlanid: vlan ID
+ * @maxrate: max Tx rate in Mbps
+ * @spoofchk: enable address spoof checking
+ * @trust: enable VF trust
+ * @linkstate: set link up or down
+ * @stats_pa: set DMA address for VF stats
*/
struct ionic_vf_setattr_cmd {
u8 opcode;
@@ -1701,8 +1823,8 @@ struct ionic_vf_setattr_comp {
/**
* struct ionic_vf_getattr_cmd - Get VF attributes from the NIC
* @opcode: Opcode
- * @index: VF index
* @attr: Attribute type (enum ionic_vf_attr)
+ * @vf_index: VF index
*/
struct ionic_vf_getattr_cmd {
u8 opcode;
@@ -1729,19 +1851,85 @@ struct ionic_vf_getattr_comp {
};
/**
- * union ionic_qos_config - Qos configuration structure
+ * struct ionic_qos_identify_cmd - QoS identify command
+ * @opcode: opcode
+ * @ver: Highest version of identify supported by driver
+ *
+ */
+struct ionic_qos_identify_cmd {
+ u8 opcode;
+ u8 ver;
+ u8 rsvd[62];
+};
+
+/**
+ * struct ionic_qos_identify_comp - QoS identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @ver: Version of identify returned by device
+ */
+struct ionic_qos_identify_comp {
+ u8 status;
+ u8 ver;
+ u8 rsvd[14];
+};
+
+#define IONIC_QOS_TC_MAX 8
+/* Capri max supported, should be renamed. */
+#define IONIC_QOS_CLASS_MAX 7
+#define IONIC_QOS_PCP_MAX 8
+#define IONIC_QOS_CLASS_NAME_SZ 32
+#define IONIC_QOS_DSCP_MAX 64
+#define IONIC_QOS_ALL_PCP 0xFF
+
+/**
+ * enum ionic_qos_class
+ */
+enum ionic_qos_class {
+ IONIC_QOS_CLASS_DEFAULT = 0,
+ IONIC_QOS_CLASS_USER_DEFINED_1 = 1,
+ IONIC_QOS_CLASS_USER_DEFINED_2 = 2,
+ IONIC_QOS_CLASS_USER_DEFINED_3 = 3,
+ IONIC_QOS_CLASS_USER_DEFINED_4 = 4,
+ IONIC_QOS_CLASS_USER_DEFINED_5 = 5,
+ IONIC_QOS_CLASS_USER_DEFINED_6 = 6,
+};
+
+/**
+ * enum ionic_qos_class_type - Traffic classification criteria
+ * @IONIC_QOS_CLASS_TYPE_NONE: No QoS
+ * @IONIC_QOS_CLASS_TYPE_PCP: Dot1Q PCP
+ * @IONIC_QOS_CLASS_TYPE_DSCP: IP DSCP
+ */
+enum ionic_qos_class_type {
+ IONIC_QOS_CLASS_TYPE_NONE = 0,
+ IONIC_QOS_CLASS_TYPE_PCP = 1,
+ IONIC_QOS_CLASS_TYPE_DSCP = 2,
+};
+
+/**
+ * enum ionic_qos_sched_type - QoS class scheduling type
+ * @IONIC_QOS_SCHED_TYPE_STRICT: Strict priority
+ * @IONIC_QOS_SCHED_TYPE_DWRR: Deficit weighted round-robin
+ */
+enum ionic_qos_sched_type {
+ IONIC_QOS_SCHED_TYPE_STRICT = 0,
+ IONIC_QOS_SCHED_TYPE_DWRR = 1,
+};
+
+/**
+ * union ionic_qos_config - QoS configuration structure
* @flags: Configuration flags
* IONIC_QOS_CONFIG_F_ENABLE enable
- * IONIC_QOS_CONFIG_F_DROP drop/nodrop
+ * IONIC_QOS_CONFIG_F_NO_DROP drop/nodrop
* IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite
* IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite
- * @sched_type: Qos class scheduling type (enum ionic_qos_sched_type)
- * @class_type: Qos class type (enum ionic_qos_class_type)
- * @pause_type: Qos pause type (enum ionic_qos_pause_type)
- * @name: Qos class name
+ * @sched_type: QoS class scheduling type (enum ionic_qos_sched_type)
+ * @class_type: QoS class type (enum ionic_qos_class_type)
+ * @pause_type: QoS pause type (enum ionic_qos_pause_type)
+ * @name: QoS class name
* @mtu: MTU of the class
- * @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP)
- * @dwrr_weight: Qos class scheduling weight
+ * @pfc_cos: Priority-Flow Control class of service
+ * @dwrr_weight: QoS class scheduling weight
* @strict_rlmt: Rate limit for strict priority scheduling
* @rw_dot1q_pcp: Rewrite dot1q pcp to this value (valid iff F_RW_DOT1Q_PCP)
* @rw_ip_dscp: Rewrite ip dscp to this value (valid iff F_RW_IP_DSCP)
@@ -1752,7 +1940,8 @@ struct ionic_vf_getattr_comp {
union ionic_qos_config {
struct {
#define IONIC_QOS_CONFIG_F_ENABLE BIT(0)
-#define IONIC_QOS_CONFIG_F_DROP BIT(1)
+#define IONIC_QOS_CONFIG_F_NO_DROP BIT(1)
+/* Used to rewrite PCP or DSCP value. */
#define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2)
#define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3)
u8 flags;
@@ -1769,6 +1958,7 @@ union ionic_qos_config {
__le64 strict_rlmt;
};
/* marking */
+ /* Used to rewrite PCP or DSCP value. */
union {
u8 rw_dot1q_pcp;
u8 rw_ip_dscp;
@@ -1778,7 +1968,7 @@ union ionic_qos_config {
u8 dot1q_pcp;
struct {
u8 ndscp;
- u8 ip_dscp[IONIC_QOS_DSCP_MAX_VALUES];
+ u8 ip_dscp[IONIC_QOS_DSCP_MAX];
};
};
};
@@ -1797,15 +1987,15 @@ union ionic_qos_identity {
u8 version;
u8 type;
u8 rsvd[62];
- union ionic_qos_config config[IONIC_QOS_CLASS_MAX];
+ union ionic_qos_config config[IONIC_QOS_CLASS_MAX];
};
- __le32 words[512];
+ __le32 words[478];
};
/**
- * struct qos_init_cmd - QoS config init command
+ * struct ionic_qos_init_cmd - QoS config init command
* @opcode: Opcode
- * @group: Qos class id
+ * @group: QoS class id
* @info_pa: destination address for qos info
*/
struct ionic_qos_init_cmd {
@@ -1819,8 +2009,9 @@ struct ionic_qos_init_cmd {
typedef struct ionic_admin_comp ionic_qos_init_comp;
/**
- * struct ionic_qos_reset_cmd - Qos config reset command
+ * struct ionic_qos_reset_cmd - QoS config reset command
* @opcode: Opcode
+ * @group: QoS class id
*/
struct ionic_qos_reset_cmd {
u8 opcode;
@@ -1847,10 +2038,16 @@ struct ionic_fw_download_cmd {
typedef struct ionic_admin_comp ionic_fw_download_comp;
+/**
+ * enum ionic_fw_control_oper - FW control operations
+ * @IONIC_FW_RESET: Reset firmware
+ * @IONIC_FW_INSTALL: Install firmware
+ * @IONIC_FW_ACTIVATE: Activate firmware
+ */
enum ionic_fw_control_oper {
- IONIC_FW_RESET = 0, /* Reset firmware */
- IONIC_FW_INSTALL = 1, /* Install firmware */
- IONIC_FW_ACTIVATE = 2, /* Activate firmware */
+ IONIC_FW_RESET = 0,
+ IONIC_FW_INSTALL = 1,
+ IONIC_FW_ACTIVATE = 2,
};
/**
@@ -1869,8 +2066,10 @@ struct ionic_fw_control_cmd {
/**
* struct ionic_fw_control_comp - Firmware control copletion
- * @opcode: opcode
- * @slot: slot where the firmware was installed
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @slot: Slot where the firmware was installed
+ * @color: Color bit
*/
struct ionic_fw_control_comp {
u8 status;
@@ -1888,11 +2087,11 @@ struct ionic_fw_control_comp {
/**
* struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd
* @opcode: opcode
- * @lif_index: lif index
+ * @lif_index: LIF index
*
- * There is no rdma specific dev command completion struct. Completion uses
+ * There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
- * Nonzero status means the LIF does not support rdma.
+ * Nonzero status means the LIF does not support RDMA.
**/
struct ionic_rdma_reset_cmd {
u8 opcode;
@@ -1904,30 +2103,29 @@ struct ionic_rdma_reset_cmd {
/**
* struct ionic_rdma_queue_cmd - Create RDMA Queue command
* @opcode: opcode, 52, 53
- * @lif_index lif index
- * @qid_ver: (qid | (rdma version << 24))
+ * @lif_index: LIF index
+ * @qid_ver: (qid | (RDMA version << 24))
* @cid: intr, eq_id, or cq_id
* @dbid: doorbell page id
* @depth_log2: log base two of queue depth
* @stride_log2: log base two of queue stride
* @dma_addr: address of the queue memory
- * @xxx_table_index: temporary, but should not need pgtbl for contig. queues.
*
- * The same command struct is used to create an rdma event queue, completion
- * queue, or rdma admin queue. The cid is an interrupt number for an event
+ * The same command struct is used to create an RDMA event queue, completion
+ * queue, or RDMA admin queue. The cid is an interrupt number for an event
* queue, an event queue id for a completion queue, or a completion queue id
- * for an rdma admin queue.
+ * for an RDMA admin queue.
*
* The queue created via a dev command must be contiguous in dma space.
*
* The dev commands are intended only to be used during driver initialization,
- * to create queues supporting the rdma admin queue. Other queues, and other
- * types of rdma resources like memory regions, will be created and registered
- * via the rdma admin queue, and will support a more complete interface
+ * to create queues supporting the RDMA admin queue. Other queues, and other
+ * types of RDMA resources like memory regions, will be created and registered
+ * via the RDMA admin queue, and will support a more complete interface
* providing scatter gather lists for larger, scattered queue buffers and
* memory registration.
*
- * There is no rdma specific dev command completion struct. Completion uses
+ * There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
**/
struct ionic_rdma_queue_cmd {
@@ -1940,8 +2138,7 @@ struct ionic_rdma_queue_cmd {
u8 depth_log2;
u8 stride_log2;
__le64 dma_addr;
- u8 rsvd2[36];
- __le32 xxx_table_index;
+ u8 rsvd2[40];
};
/******************************************************************
@@ -1949,7 +2146,7 @@ struct ionic_rdma_queue_cmd {
******************************************************************/
/**
- * struct ionic_notifyq_event
+ * struct ionic_notifyq_event - Generic event reporting structure
* @eid: event number
* @ecode: event code
* @data: unspecified data about the event
@@ -1964,9 +2161,9 @@ struct ionic_notifyq_event {
};
/**
- * struct ionic_link_change_event
+ * struct ionic_link_change_event - Link change event notification
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_LINK_CHANGE
+ * @ecode: event code = IONIC_EVENT_LINK_CHANGE
* @link_status: link up or down, with error bits (enum port_status)
* @link_speed: speed of the network link
*
@@ -1981,9 +2178,9 @@ struct ionic_link_change_event {
};
/**
- * struct ionic_reset_event
+ * struct ionic_reset_event - Reset event notification
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_RESET
+ * @ecode: event code = IONIC_EVENT_RESET
* @reset_code: reset type
* @state: 0=pending, 1=complete, 2=error
*
@@ -1999,11 +2196,9 @@ struct ionic_reset_event {
};
/**
- * struct ionic_heartbeat_event
+ * struct ionic_heartbeat_event - Sent periodically by NIC to indicate health
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_HEARTBEAT
- *
- * Sent periodically by the NIC to indicate continued health
+ * @ecode: event code = IONIC_EVENT_HEARTBEAT
*/
struct ionic_heartbeat_event {
__le64 eid;
@@ -2012,12 +2207,10 @@ struct ionic_heartbeat_event {
};
/**
- * struct ionic_log_event
+ * struct ionic_log_event - Sent to notify the driver of an internal error
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_LOG
+ * @ecode: event code = IONIC_EVENT_LOG
* @data: log data
- *
- * Sent to notify the driver of an internal error.
*/
struct ionic_log_event {
__le64 eid;
@@ -2026,7 +2219,18 @@ struct ionic_log_event {
};
/**
- * struct ionic_port_stats
+ * struct ionic_xcvr_event - Transceiver change event
+ * @eid: event number
+ * @ecode: event code = IONIC_EVENT_XCVR
+ */
+struct ionic_xcvr_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 rsvd[54];
+};
+
+/**
+ * struct ionic_port_stats - Port statistics structure
*/
struct ionic_port_stats {
__le64 frames_rx_ok;
@@ -2131,28 +2335,61 @@ struct ionic_mgmt_port_stats {
__le64 frames_rx_multicast;
__le64 frames_rx_broadcast;
__le64 frames_rx_pause;
- __le64 frames_rx_bad_length0;
- __le64 frames_rx_undersized1;
- __le64 frames_rx_oversized2;
- __le64 frames_rx_fragments3;
- __le64 frames_rx_jabber4;
- __le64 frames_rx_64b5;
- __le64 frames_rx_65b_127b6;
- __le64 frames_rx_128b_255b7;
- __le64 frames_rx_256b_511b8;
- __le64 frames_rx_512b_1023b9;
- __le64 frames_rx_1024b_1518b0;
- __le64 frames_rx_gt_1518b1;
- __le64 frames_rx_fifo_full2;
- __le64 frames_tx_ok3;
- __le64 frames_tx_all4;
- __le64 frames_tx_bad5;
- __le64 octets_tx_ok6;
- __le64 octets_tx_total7;
- __le64 frames_tx_unicast8;
- __le64 frames_tx_multicast9;
- __le64 frames_tx_broadcast0;
- __le64 frames_tx_pause1;
+ __le64 frames_rx_bad_length;
+ __le64 frames_rx_undersized;
+ __le64 frames_rx_oversized;
+ __le64 frames_rx_fragments;
+ __le64 frames_rx_jabber;
+ __le64 frames_rx_64b;
+ __le64 frames_rx_65b_127b;
+ __le64 frames_rx_128b_255b;
+ __le64 frames_rx_256b_511b;
+ __le64 frames_rx_512b_1023b;
+ __le64 frames_rx_1024b_1518b;
+ __le64 frames_rx_gt_1518b;
+ __le64 frames_rx_fifo_full;
+ __le64 frames_tx_ok;
+ __le64 frames_tx_all;
+ __le64 frames_tx_bad;
+ __le64 octets_tx_ok;
+ __le64 octets_tx_total;
+ __le64 frames_tx_unicast;
+ __le64 frames_tx_multicast;
+ __le64 frames_tx_broadcast;
+ __le64 frames_tx_pause;
+};
+
+enum ionic_pb_buffer_drop_stats {
+ IONIC_BUFFER_INTRINSIC_DROP = 0,
+ IONIC_BUFFER_DISCARDED,
+ IONIC_BUFFER_ADMITTED,
+ IONIC_BUFFER_OUT_OF_CELLS_DROP,
+ IONIC_BUFFER_OUT_OF_CELLS_DROP_2,
+ IONIC_BUFFER_OUT_OF_CREDIT_DROP,
+ IONIC_BUFFER_TRUNCATION_DROP,
+ IONIC_BUFFER_PORT_DISABLED_DROP,
+ IONIC_BUFFER_COPY_TO_CPU_TAIL_DROP,
+ IONIC_BUFFER_SPAN_TAIL_DROP,
+ IONIC_BUFFER_MIN_SIZE_VIOLATION_DROP,
+ IONIC_BUFFER_ENQUEUE_ERROR_DROP,
+ IONIC_BUFFER_INVALID_PORT_DROP,
+ IONIC_BUFFER_INVALID_OUTPUT_QUEUE_DROP,
+ IONIC_BUFFER_DROP_MAX,
+};
+
+/**
+ * struct port_pb_stats - packet buffers system stats
+ * uses ionic_pb_buffer_drop_stats for drop_counts[]
+ */
+struct ionic_port_pb_stats {
+ __le64 sop_count_in;
+ __le64 eop_count_in;
+ __le64 sop_count_out;
+ __le64 eop_count_out;
+ __le64 drop_counts[IONIC_BUFFER_DROP_MAX];
+ __le64 input_queue_buffer_occupancy[IONIC_QOS_TC_MAX];
+ __le64 input_queue_port_monitor[IONIC_QOS_TC_MAX];
+ __le64 output_queue_port_monitor[IONIC_QOS_TC_MAX];
};
/**
@@ -2184,22 +2421,31 @@ union ionic_port_identity {
u8 rsvd2[44];
union ionic_port_config config;
};
- __le32 words[512];
+ __le32 words[478];
};
/**
* struct ionic_port_info - port info structure
- * @port_status: port status
- * @port_stats: port stats
+ * @config: Port configuration data
+ * @status: Port status data
+ * @stats: Port statistics data
+ * @mgmt_stats: Port management statistics data
+ * @port_pb_drop_stats: uplink pb drop stats
*/
struct ionic_port_info {
union ionic_port_config config;
struct ionic_port_status status;
- struct ionic_port_stats stats;
+ union {
+ struct ionic_port_stats stats;
+ struct ionic_mgmt_port_stats mgmt_stats;
+ };
+ /* room for pb_stats to start at 2k offset */
+ u8 rsvd[760];
+ struct ionic_port_pb_stats pb_stats;
};
/**
- * struct ionic_lif_stats
+ * struct ionic_lif_stats - LIF statistics structure
*/
struct ionic_lif_stats {
/* RX */
@@ -2252,7 +2498,7 @@ struct ionic_lif_stats {
__le64 tx_queue_error;
__le64 tx_desc_fetch_error;
__le64 tx_desc_data_error;
- __le64 rsvd9;
+ __le64 tx_queue_empty;
__le64 rsvd10;
__le64 rsvd11;
__le64 rsvd12;
@@ -2353,7 +2599,10 @@ struct ionic_lif_stats {
};
/**
- * struct ionic_lif_info - lif info structure
+ * struct ionic_lif_info - LIF info structure
+ * @config: LIF configuration structure
+ * @status: LIF status structure
+ * @stats: LIF statistics structure
*/
struct ionic_lif_info {
union ionic_lif_config config;
@@ -2389,7 +2638,9 @@ union ionic_dev_cmd {
struct ionic_qos_init_cmd qos_init;
struct ionic_qos_reset_cmd qos_reset;
+ struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
+ struct ionic_q_control_cmd q_control;
};
union ionic_dev_cmd_comp {
@@ -2421,19 +2672,20 @@ union ionic_dev_cmd_comp {
ionic_qos_init_comp qos_init;
ionic_qos_reset_comp qos_reset;
+ struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
};
/**
- * union dev_info - Device info register format (read-only)
- * @signature: Signature value of 0x44455649 ('DEVI').
- * @version: Current version of info.
- * @asic_type: Asic type.
- * @asic_rev: Asic revision.
- * @fw_status: Firmware status.
- * @fw_heartbeat: Firmware heartbeat counter.
- * @serial_num: Serial number.
- * @fw_version: Firmware version.
+ * union ionic_dev_info_regs - Device info register format (read-only)
+ * @signature: Signature value of 0x44455649 ('DEVI')
+ * @version: Current version of info
+ * @asic_type: Asic type
+ * @asic_rev: Asic revision
+ * @fw_status: Firmware status
+ * @fw_heartbeat: Firmware heartbeat counter
+ * @serial_num: Serial number
+ * @fw_version: Firmware version
*/
union ionic_dev_info_regs {
#define IONIC_DEVINFO_FWVERS_BUFLEN 32
@@ -2454,10 +2706,10 @@ union ionic_dev_info_regs {
/**
* union ionic_dev_cmd_regs - Device command register format (read-write)
- * @doorbell: Device Cmd Doorbell, write-only.
+ * @doorbell: Device Cmd Doorbell, write-only
* Write a 1 to signal device to process cmd,
* poll done for completion.
- * @done: Done indicator, bit 0 == 1 when command is complete.
+ * @done: Done indicator, bit 0 == 1 when command is complete
* @cmd: Opcode-specific command bytes
* @comp: Opcode-specific response bytes
* @data: Opcode-specific side-data
@@ -2475,7 +2727,7 @@ union ionic_dev_cmd_regs {
};
/**
- * union ionic_dev_regs - Device register format in for bar 0 page 0
+ * union ionic_dev_regs - Device register format for bar 0 page 0
* @info: Device info registers
* @devcmd: Device command registers
*/
@@ -2490,6 +2742,7 @@ union ionic_dev_regs {
union ionic_adminq_cmd {
struct ionic_admin_cmd cmd;
struct ionic_nop_cmd nop;
+ struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
struct ionic_q_control_cmd q_control;
struct ionic_lif_setattr_cmd lif_setattr;
@@ -2506,6 +2759,7 @@ union ionic_adminq_cmd {
union ionic_adminq_comp {
struct ionic_admin_comp comp;
struct ionic_nop_comp nop;
+ struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
struct ionic_lif_setattr_comp lif_setattr;
struct ionic_lif_getattr_comp lif_getattr;
@@ -2531,14 +2785,14 @@ union ionic_adminq_comp {
/**
* struct ionic_doorbell - Doorbell register layout
* @p_index: Producer index
- * @ring: Selects the specific ring of the queue to update.
+ * @ring: Selects the specific ring of the queue to update
* Type-specific meaning:
- * ring=0: Default producer/consumer queue.
+ * ring=0: Default producer/consumer queue
* ring=1: (CQ, EQ) Re-Arm queue. RDMA CQs
* send events to EQs when armed. EQs send
* interrupts when armed.
- * @qid: The queue id selects the queue destination for the
- * producer index and flags.
+ * @qid_lo: Queue destination for the producer index and flags (low bits)
+ * @qid_hi: Queue destination for the producer index and flags (high bits)
*/
struct ionic_doorbell {
__le16 p_index;
@@ -2571,6 +2825,7 @@ struct ionic_identity {
union ionic_lif_identity lif;
union ionic_port_identity port;
union ionic_qos_identity qos;
+ union ionic_q_identity txq;
};
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 5acf4f46c268..80b4d8332109 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -17,6 +17,16 @@
#include "ionic_ethtool.h"
#include "ionic_debugfs.h"
+/* queuetype support level */
+static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
+ [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
+ [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
+ [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
+ [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
+ * 1 = ... with Tx SG version 1
+ */
+};
+
static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
@@ -27,6 +37,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
+static void ionic_lif_queue_identify(struct ionic_lif *lif);
static void ionic_lif_deferred_work(struct work_struct *work)
{
@@ -186,10 +197,10 @@ static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
return 0;
}
-static void ionic_intr_free(struct ionic_lif *lif, int index)
+static void ionic_intr_free(struct ionic *ionic, int index)
{
- if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
- clear_bit(index, lif->ionic->intrs);
+ if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
+ clear_bit(index, ionic->intrs);
}
static int ionic_qcq_enable(struct ionic_qcq *qcq)
@@ -299,7 +310,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
irq_set_affinity_hint(qcq->intr.vector, NULL);
devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
qcq->intr.vector = 0;
- ionic_intr_free(lif, qcq->intr.index);
+ ionic_intr_free(lif->ionic, qcq->intr.index);
}
devm_kfree(dev, qcq->cq.info);
@@ -345,7 +356,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
struct ionic_qcq *n_qcq)
{
if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
- ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
+ ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
n_qcq->flags &= ~IONIC_QCQ_F_INTR;
}
@@ -444,7 +455,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
cpumask_set_cpu(new->intr.cpu,
&new->intr.affinity_mask);
} else {
- new->intr.index = INTR_INDEX_NOT_ASSIGNED;
+ new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
@@ -497,7 +508,7 @@ err_out_free_irq:
devm_free_irq(dev, new->intr.vector, &new->napi);
err_out_free_intr:
if (flags & IONIC_QCQ_F_INTR)
- ionic_intr_free(lif, new->intr.index);
+ ionic_intr_free(lif->ionic, new->intr.index);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
@@ -597,6 +608,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
@@ -614,6 +626,8 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
+ dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
q->tail = q->info;
q->head = q->tail;
@@ -646,6 +660,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
@@ -663,6 +678,8 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
+ dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
q->tail = q->info;
q->head = q->tail;
@@ -726,7 +743,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
}
break;
default:
- netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
+ netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
comp->event.ecode, eid);
break;
}
@@ -775,8 +792,8 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
return max(n_work, a_work);
}
-static void ionic_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *ns)
+void ionic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *ns)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_lif_stats *ls;
@@ -1509,17 +1526,25 @@ static void ionic_txrx_free(struct ionic_lif *lif)
static int ionic_txrx_alloc(struct ionic_lif *lif)
{
+ unsigned int sg_desc_sz;
unsigned int flags;
unsigned int i;
int err = 0;
+ if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
+ lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
+ sizeof(struct ionic_txq_sg_desc_v1))
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
+ else
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
+
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
lif->ntxq_descs,
sizeof(struct ionic_txq_desc),
sizeof(struct ionic_txq_comp),
- sizeof(struct ionic_txq_sg_desc),
+ sg_desc_sz,
lif->kern_pid, &lif->txqcqs[i].qcq);
if (err)
goto err_out;
@@ -1682,7 +1707,7 @@ int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ if (!netif_device_present(netdev))
return 0;
ionic_stop_queues(lif);
@@ -1699,6 +1724,9 @@ static int ionic_get_vf_config(struct net_device *netdev,
struct ionic *ionic = lif->ionic;
int ret = 0;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_read(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1726,6 +1754,9 @@ static int ionic_get_vf_stats(struct net_device *netdev, int vf,
struct ionic_lif_stats *vs;
int ret = 0;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_read(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1761,6 +1792,9 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
return -EINVAL;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1792,6 +1826,9 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1818,6 +1855,9 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
if (tx_min)
return -EINVAL;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1840,6 +1880,9 @@ static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
u8 data = set; /* convert to u8 for config */
int ret;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1862,6 +1905,9 @@ static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
u8 data = set; /* convert to u8 for config */
int ret;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1898,6 +1944,9 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
return -EINVAL;
}
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -2065,9 +2114,17 @@ int ionic_lifs_alloc(struct ionic *ionic)
/* only build the first lif, others are for later features */
set_bit(0, ionic->lifbits);
+
lif = ionic_lif_alloc(ionic, 0);
+ if (IS_ERR_OR_NULL(lif)) {
+ clear_bit(0, ionic->lifbits);
+ return -ENOMEM;
+ }
- return PTR_ERR_OR_ZERO(lif);
+ lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
+ ionic_lif_queue_identify(lif);
+
+ return 0;
}
static void ionic_lif_reset(struct ionic_lif *lif)
@@ -2101,6 +2158,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
ionic_txrx_free(lif);
}
ionic_lifs_deinit(ionic);
+ ionic_reset(ionic);
ionic_qcqs_free(lif);
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
@@ -2116,6 +2174,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
dev_info(ionic->dev, "FW Up: restarting LIFs\n");
+ ionic_init_devinfo(ionic);
err = ionic_qcqs_alloc(lif);
if (err)
goto err_out;
@@ -2289,6 +2348,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_ENA),
@@ -2549,8 +2609,6 @@ int ionic_lifs_register(struct ionic *ionic)
dev_err(ionic->dev, "Cannot register net device, aborting\n");
return err;
}
-
- ionic_link_status_check_request(ionic->master_lif);
ionic->master_lif->registered = true;
return 0;
@@ -2573,6 +2631,80 @@ void ionic_lifs_unregister(struct ionic *ionic)
unregister_netdev(ionic->master_lif->netdev);
}
+static void ionic_lif_queue_identify(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ union ionic_q_identity *q_ident;
+ struct ionic_dev *idev;
+ int qtype;
+ int err;
+
+ idev = &lif->ionic->idev;
+ q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
+
+ for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
+ struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
+
+ /* filter out the ones we know about */
+ switch (qtype) {
+ case IONIC_QTYPE_ADMINQ:
+ case IONIC_QTYPE_NOTIFYQ:
+ case IONIC_QTYPE_RXQ:
+ case IONIC_QTYPE_TXQ:
+ break;
+ default:
+ continue;
+ }
+
+ memset(qti, 0, sizeof(*qti));
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
+ ionic_qtype_versions[qtype]);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ if (!err) {
+ qti->version = q_ident->version;
+ qti->supported = q_ident->supported;
+ qti->features = le64_to_cpu(q_ident->features);
+ qti->desc_sz = le16_to_cpu(q_ident->desc_sz);
+ qti->comp_sz = le16_to_cpu(q_ident->comp_sz);
+ qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz);
+ qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
+ qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
+ }
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err == -EINVAL) {
+ dev_err(ionic->dev, "qtype %d not supported\n", qtype);
+ continue;
+ } else if (err == -EIO) {
+ dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
+ return;
+ } else if (err) {
+ dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
+ qtype, err);
+ return;
+ }
+
+ dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
+ qtype, qti->version);
+ dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
+ qtype, qti->supported);
+ dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
+ qtype, qti->features);
+ dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
+ qtype, qti->desc_sz);
+ dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
+ qtype, qti->comp_sz);
+ dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
+ qtype, qti->sg_desc_sz);
+ dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
+ qtype, qti->max_sg_elems);
+ dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
+ qtype, qti->sg_desc_stride);
+ }
+}
+
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lid)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 5d4ffda5c05f..c3428034a17b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -20,11 +20,13 @@ struct ionic_tx_stats {
u64 bytes;
u64 clean;
u64 linearize;
- u64 no_csum;
+ u64 csum_none;
u64 csum;
u64 crc32_csum;
u64 tso;
+ u64 tso_bytes;
u64 frags;
+ u64 vlan_inserted;
u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
};
@@ -38,6 +40,7 @@ struct ionic_rx_stats {
u64 csum_error;
u64 buffers_posted;
u64 dropped;
+ u64 vlan_stripped;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -114,11 +117,17 @@ struct ionic_lif_sw_stats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_tso;
- u64 tx_no_csum;
+ u64 tx_tso_bytes;
+ u64 tx_csum_none;
u64 tx_csum;
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_error;
+ u64 hw_tx_dropped;
+ u64 hw_rx_dropped;
+ u64 hw_rx_over_errors;
+ u64 hw_rx_missed_errors;
+ u64 hw_tx_aborted_errors;
};
enum ionic_lif_state_flags {
@@ -133,6 +142,17 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_STATE_SIZE
};
+struct ionic_qtype_info {
+ u8 version;
+ u8 supported;
+ u64 features;
+ u16 desc_sz;
+ u16 comp_sz;
+ u16 sg_desc_sz;
+ u16 max_sg_elems;
+ u16 sg_desc_stride;
+};
+
#define IONIC_LIF_NAME_MAX_SZ 32
struct ionic_lif {
char name[IONIC_LIF_NAME_MAX_SZ];
@@ -161,11 +181,13 @@ struct ionic_lif {
bool mc_overflow;
unsigned int nmcast;
bool uc_overflow;
+ u16 lif_type;
unsigned int nucast;
struct ionic_lif_info *info;
dma_addr_t info_pa;
u32 info_sz;
+ struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
u16 rss_types;
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
@@ -227,6 +249,8 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
}
void ionic_link_status_check_request(struct ionic_lif *lif);
+void ionic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *ns);
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
struct ionic_deferred_work *work);
int ionic_lifs_alloc(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 3ed150512091..92110abcff96 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -152,6 +152,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_RX_FILTER_ADD";
case IONIC_CMD_RX_FILTER_DEL:
return "IONIC_CMD_RX_FILTER_DEL";
+ case IONIC_CMD_Q_IDENTIFY:
+ return "IONIC_CMD_Q_IDENTIFY";
case IONIC_CMD_Q_INIT:
return "IONIC_CMD_Q_INIT";
case IONIC_CMD_Q_CONTROL:
@@ -356,7 +358,7 @@ try_again:
done = ionic_dev_cmd_done(idev);
if (done)
break;
- msleep(20);
+ msleep(5);
hb = ionic_heartbeat_check(ionic);
} while (!done && !hb && time_before(jiffies, max_wait));
duration = jiffies - start_time;
@@ -413,6 +415,7 @@ int ionic_setup(struct ionic *ionic)
err = ionic_dev_setup(ionic);
if (err)
return err;
+ ionic_reset(ionic);
return 0;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 8f2a8fb029f1..2a1885da58a6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -15,11 +15,109 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
IONIC_LIF_STAT_DESC(rx_packets),
IONIC_LIF_STAT_DESC(rx_bytes),
IONIC_LIF_STAT_DESC(tx_tso),
- IONIC_LIF_STAT_DESC(tx_no_csum),
+ IONIC_LIF_STAT_DESC(tx_tso_bytes),
+ IONIC_LIF_STAT_DESC(tx_csum_none),
IONIC_LIF_STAT_DESC(tx_csum),
IONIC_LIF_STAT_DESC(rx_csum_none),
IONIC_LIF_STAT_DESC(rx_csum_complete),
IONIC_LIF_STAT_DESC(rx_csum_error),
+ IONIC_LIF_STAT_DESC(hw_tx_dropped),
+ IONIC_LIF_STAT_DESC(hw_rx_dropped),
+ IONIC_LIF_STAT_DESC(hw_rx_over_errors),
+ IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
+ IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
+};
+
+static const struct ionic_stat_desc ionic_port_stats_desc[] = {
+ IONIC_PORT_STAT_DESC(frames_rx_ok),
+ IONIC_PORT_STAT_DESC(frames_rx_all),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_fcs),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_all),
+ IONIC_PORT_STAT_DESC(octets_rx_ok),
+ IONIC_PORT_STAT_DESC(octets_rx_all),
+ IONIC_PORT_STAT_DESC(frames_rx_unicast),
+ IONIC_PORT_STAT_DESC(frames_rx_multicast),
+ IONIC_PORT_STAT_DESC(frames_rx_broadcast),
+ IONIC_PORT_STAT_DESC(frames_rx_pause),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_length),
+ IONIC_PORT_STAT_DESC(frames_rx_undersized),
+ IONIC_PORT_STAT_DESC(frames_rx_oversized),
+ IONIC_PORT_STAT_DESC(frames_rx_fragments),
+ IONIC_PORT_STAT_DESC(frames_rx_jabber),
+ IONIC_PORT_STAT_DESC(frames_rx_pripause),
+ IONIC_PORT_STAT_DESC(frames_rx_stomped_crc),
+ IONIC_PORT_STAT_DESC(frames_rx_too_long),
+ IONIC_PORT_STAT_DESC(frames_rx_vlan_good),
+ IONIC_PORT_STAT_DESC(frames_rx_dropped),
+ IONIC_PORT_STAT_DESC(frames_rx_less_than_64b),
+ IONIC_PORT_STAT_DESC(frames_rx_64b),
+ IONIC_PORT_STAT_DESC(frames_rx_65b_127b),
+ IONIC_PORT_STAT_DESC(frames_rx_128b_255b),
+ IONIC_PORT_STAT_DESC(frames_rx_256b_511b),
+ IONIC_PORT_STAT_DESC(frames_rx_512b_1023b),
+ IONIC_PORT_STAT_DESC(frames_rx_1024b_1518b),
+ IONIC_PORT_STAT_DESC(frames_rx_1519b_2047b),
+ IONIC_PORT_STAT_DESC(frames_rx_2048b_4095b),
+ IONIC_PORT_STAT_DESC(frames_rx_4096b_8191b),
+ IONIC_PORT_STAT_DESC(frames_rx_8192b_9215b),
+ IONIC_PORT_STAT_DESC(frames_rx_other),
+ IONIC_PORT_STAT_DESC(frames_tx_ok),
+ IONIC_PORT_STAT_DESC(frames_tx_all),
+ IONIC_PORT_STAT_DESC(frames_tx_bad),
+ IONIC_PORT_STAT_DESC(octets_tx_ok),
+ IONIC_PORT_STAT_DESC(octets_tx_total),
+ IONIC_PORT_STAT_DESC(frames_tx_unicast),
+ IONIC_PORT_STAT_DESC(frames_tx_multicast),
+ IONIC_PORT_STAT_DESC(frames_tx_broadcast),
+ IONIC_PORT_STAT_DESC(frames_tx_pause),
+ IONIC_PORT_STAT_DESC(frames_tx_pripause),
+ IONIC_PORT_STAT_DESC(frames_tx_vlan),
+ IONIC_PORT_STAT_DESC(frames_tx_less_than_64b),
+ IONIC_PORT_STAT_DESC(frames_tx_64b),
+ IONIC_PORT_STAT_DESC(frames_tx_65b_127b),
+ IONIC_PORT_STAT_DESC(frames_tx_128b_255b),
+ IONIC_PORT_STAT_DESC(frames_tx_256b_511b),
+ IONIC_PORT_STAT_DESC(frames_tx_512b_1023b),
+ IONIC_PORT_STAT_DESC(frames_tx_1024b_1518b),
+ IONIC_PORT_STAT_DESC(frames_tx_1519b_2047b),
+ IONIC_PORT_STAT_DESC(frames_tx_2048b_4095b),
+ IONIC_PORT_STAT_DESC(frames_tx_4096b_8191b),
+ IONIC_PORT_STAT_DESC(frames_tx_8192b_9215b),
+ IONIC_PORT_STAT_DESC(frames_tx_other),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_0),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_1),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_2),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_3),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_4),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_5),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_6),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_7),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_0),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_1),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_2),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_3),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_4),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_5),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_6),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_7),
+ IONIC_PORT_STAT_DESC(tx_pripause_0_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_1_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_2_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_3_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_4_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_5_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_6_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_7_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_0_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_1_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_2_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_3_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_4_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_5_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_6_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_7_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pause_1us_count),
+ IONIC_PORT_STAT_DESC(frames_tx_truncated),
};
static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
@@ -29,6 +127,11 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(dma_map_err),
IONIC_TX_STAT_DESC(linearize),
IONIC_TX_STAT_DESC(frags),
+ IONIC_TX_STAT_DESC(tso),
+ IONIC_TX_STAT_DESC(tso_bytes),
+ IONIC_TX_STAT_DESC(csum_none),
+ IONIC_TX_STAT_DESC(csum),
+ IONIC_TX_STAT_DESC(vlan_inserted),
};
static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
@@ -40,6 +143,7 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(csum_complete),
IONIC_RX_STAT_DESC(csum_error),
IONIC_RX_STAT_DESC(dropped),
+ IONIC_RX_STAT_DESC(vlan_stripped),
};
static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
@@ -62,6 +166,7 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
+#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc)
#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc)
@@ -76,6 +181,7 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
{
struct ionic_tx_stats *tstats;
struct ionic_rx_stats *rstats;
+ struct rtnl_link_stats64 ns;
struct ionic_qcq *txqcq;
struct ionic_qcq *rxqcq;
int q_num;
@@ -89,7 +195,8 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
stats->tx_packets += tstats->pkts;
stats->tx_bytes += tstats->bytes;
stats->tx_tso += tstats->tso;
- stats->tx_no_csum += tstats->no_csum;
+ stats->tx_tso_bytes += tstats->tso_bytes;
+ stats->tx_csum_none += tstats->csum_none;
stats->tx_csum += tstats->csum;
}
@@ -103,6 +210,13 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
stats->rx_csum_error += rstats->csum_error;
}
}
+
+ ionic_get_stats64(lif->netdev, &ns);
+ stats->hw_tx_dropped = ns.tx_dropped;
+ stats->hw_rx_dropped = ns.rx_dropped;
+ stats->hw_rx_over_errors = ns.rx_over_errors;
+ stats->hw_rx_missed_errors = ns.rx_missed_errors;
+ stats->hw_tx_aborted_errors = ns.tx_aborted_errors;
}
static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
@@ -118,6 +232,9 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
/* rx stats */
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
+ /* port stats */
+ total += IONIC_NUM_PORT_STATS;
+
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
@@ -144,6 +261,13 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
snprintf(*buf, ETH_GSTRING_LEN, ionic_lif_stats_desc[i].name);
*buf += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < IONIC_NUM_PORT_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ ionic_port_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN, "tx_%d_%s",
@@ -225,6 +349,7 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
{
+ struct ionic_port_stats *port_stats;
struct ionic_lif_sw_stats lif_stats;
struct ionic_qcq *txqcq, *rxqcq;
struct ionic_tx_stats *txstats;
@@ -238,6 +363,13 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
+ port_stats = &lif->ionic->idev.port_info->stats;
+ for (i = 0; i < IONIC_NUM_PORT_STATS; i++) {
+ **buf = IONIC_READ_STAT_LE64(port_stats,
+ &ionic_port_stats_desc[i]);
+ (*buf)++;
+ }
+
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
txstats = &lif_to_txstats(lif, q_num);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.h b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
index d2c1122a2c6e..3f543512616e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
@@ -11,6 +11,9 @@
.offset = IONIC_STAT_TO_OFFSET(type, stat_name) \
}
+#define IONIC_PORT_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_port_stats, stat_name)
+
#define IONIC_LIF_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_lif_sw_stats, stat_name)
@@ -45,6 +48,9 @@ extern const int ionic_num_stats_grps;
#define IONIC_READ_STAT64(base_ptr, desc_ptr) \
(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+#define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \
+ __le64_to_cpu(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+
struct ionic_stat_desc {
char name[ETH_GSTRING_LEN];
u64 offset;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index d233b6e77b1e..b7f900c11834 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -10,8 +10,10 @@
#include "ionic_lif.h"
#include "ionic_txrx.h"
-static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg);
+static void ionic_rx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
@@ -140,8 +142,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
return skb;
}
-static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+static void ionic_rx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg)
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
struct ionic_qcq *qcq = q_to_qcq(q);
@@ -210,10 +214,11 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
(comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
stats->csum_error++;
- if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(comp->vlan_tci));
+ if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(comp->vlan_tci));
+ stats->vlan_stripped++;
}
if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
@@ -475,7 +480,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
return work_done;
}
-static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len)
+static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
+ void *data, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->lif->ionic->dev;
@@ -491,7 +497,8 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t
return dma_addr;
}
-static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag,
+static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
+ const skb_frag_t *frag,
size_t offset, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
@@ -507,8 +514,10 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *fra
return dma_addr;
}
-static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+static void ionic_tx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg)
{
struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
struct ionic_txq_sg_elem *elem = sg_desc->elems;
@@ -852,6 +861,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts += total_pkts;
stats->bytes += total_bytes;
stats->tso++;
+ stats->tso_bytes += total_bytes;
return 0;
@@ -890,9 +900,12 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
flags, skb_shinfo(skb)->nr_frags, dma_addr);
desc->cmd = cpu_to_le64(cmd);
desc->len = cpu_to_le16(skb_headlen(skb));
- desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
+ if (has_vlan) {
+ desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ stats->vlan_inserted++;
+ }
if (skb->csum_not_inet)
stats->crc32_csum++;
@@ -927,9 +940,12 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
flags, skb_shinfo(skb)->nr_frags, dma_addr);
desc->cmd = cpu_to_le64(cmd);
desc->len = cpu_to_le16(skb_headlen(skb));
- desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ if (has_vlan) {
+ desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ stats->vlan_inserted++;
+ }
- stats->no_csum++;
+ stats->csum_none++;
return 0;
}
@@ -989,6 +1005,7 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
+ int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
int err;
@@ -997,7 +1014,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
return (skb->len / skb_shinfo(skb)->gso_size) + 1;
/* If non-TSO, just need 1 desc and nr_frags sg elems */
- if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
+ if (skb_shinfo(skb)->nr_frags <= sg_elems)
return 1;
/* Too many frags, so linearize */
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fa41bf08a589..66ed39d6f357 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -740,12 +740,6 @@ struct qed_dbg_feature {
u32 dumped_dwords;
};
-struct qed_dbg_params {
- struct qed_dbg_feature features[DBG_FEATURE_NUM];
- u8 engine_for_debug;
- bool print_data;
-};
-
struct qed_dev {
u32 dp_module;
u8 dp_level;
@@ -844,6 +838,9 @@ struct qed_dev {
/* Recovery */
bool recov_in_prog;
+ /* Indicates whether should prevent attentions from being reasserted */
+ bool attn_clr_en;
+
/* LLH info */
u8 ppfid_bitmap;
struct qed_llh_info *p_llh_info;
@@ -872,17 +869,18 @@ struct qed_dev {
} protocol_ops;
void *ops_cookie;
- struct qed_dbg_params dbg_params;
-
#ifdef CONFIG_QED_LL2
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
bool disable_ilt_dump;
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
+ bool print_dbg_data;
+
u32 rdma_max_sge;
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
@@ -1020,6 +1018,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
+void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
+ enum qed_hw_err_type err_type);
void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index f4eebaabb6d0..57a0dab88431 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7453,7 +7453,7 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
- &p_hwfn->cdev->dbg_params.features[feature_idx];
+ &p_hwfn->cdev->dbg_features[feature_idx];
u32 text_size_bytes, null_char_pos, i;
enum dbg_status rc;
char *text_buf;
@@ -7502,7 +7502,7 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
text_buf[i] = '\n';
/* Dump printable feature to log */
- if (p_hwfn->cdev->dbg_params.print_data)
+ if (p_hwfn->cdev->print_dbg_data)
qed_dbg_print_feature(text_buf, text_size_bytes);
/* Free the old dump_buf and point the dump_buf to the newly allocagted
@@ -7523,7 +7523,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
- &p_hwfn->cdev->dbg_params.features[feature_idx];
+ &p_hwfn->cdev->dbg_features[feature_idx];
u32 buf_size_dwords;
enum dbg_status rc;
@@ -7648,7 +7648,7 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
enum qed_nvm_images image_id)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
u32 len_rounded, i;
__be32 val;
int rc;
@@ -7780,7 +7780,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
{
u8 cur_engine, omit_engine = 0, org_engine;
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
int grc_params[MAX_DBG_GRC_PARAMS], i;
u32 offset = 0, feature_size;
@@ -8000,7 +8000,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
int qed_dbg_all_data_size(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
u8 cur_engine, org_engine;
@@ -8059,9 +8059,9 @@ int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
enum qed_dbg_features feature, u32 *num_dumped_bytes)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature =
- &cdev->dbg_params.features[feature];
+ &cdev->dbg_features[feature];
enum dbg_status dbg_rc;
struct qed_ptt *p_ptt;
int rc = 0;
@@ -8084,7 +8084,7 @@ int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
DP_VERBOSE(cdev, QED_MSG_DEBUG,
"copying debugfs feature to external buffer\n");
memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
- *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
+ *num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
4;
out:
@@ -8095,7 +8095,7 @@ out:
int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 buf_size_dwords;
@@ -8120,14 +8120,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
u8 qed_get_debug_engine(struct qed_dev *cdev)
{
- return cdev->dbg_params.engine_for_debug;
+ return cdev->engine_for_debug;
}
void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
{
DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
engine_number);
- cdev->dbg_params.engine_for_debug = engine_number;
+ cdev->engine_for_debug = engine_number;
}
void qed_dbg_pf_init(struct qed_dev *cdev)
@@ -8146,7 +8146,7 @@ void qed_dbg_pf_init(struct qed_dev *cdev)
}
/* Set the hwfn to be 0 as default */
- cdev->dbg_params.engine_for_debug = 0;
+ cdev->engine_for_debug = 0;
}
void qed_dbg_pf_exit(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 7119a18af19e..6e857468e993 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3085,7 +3085,9 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->rel_pf_id, false);
if (rc) {
- DP_NOTICE(p_hwfn, "Final cleanup failed\n");
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt,
+ QED_HW_ERR_RAMROD_FAIL,
+ "Final cleanup failed\n");
goto load_err;
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 4597015b8bff..f00460d00cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12400,6 +12400,13 @@ struct load_rsp_stc {
#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
};
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
@@ -12488,10 +12495,14 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+/* Send crash dump commands with param[3:0] - opcode */
+#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000
#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
+#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000
+
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
@@ -12517,6 +12528,21 @@ struct public_drv_mb {
#define RESOURCE_DUMP 0
+/* DRV_MSG_CODE_MDUMP_CMD parameters */
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
+
+#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
+#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b
+#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c
+
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000
@@ -12626,6 +12652,17 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xFF
+
+/* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xFF000000
+
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF
#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
@@ -12678,6 +12715,14 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000
+#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000
+
+#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
+
u32 fw_mb_param;
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
@@ -12742,9 +12787,9 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_GET_FCOE_STATS,
MFW_DRV_MSG_GET_ISCSI_STATS,
MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_BW_UPDATE10,
+ MFW_DRV_MSG_FAILURE_DETECTED,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
- MFW_DRV_MSG_BW_UPDATE11,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
MFW_DRV_MSG_RESERVED,
MFW_DRV_MSG_GET_TLV_REQ,
MFW_DRV_MSG_OEM_CFG_UPDATE,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 4ab8cfaf63d1..5fa251489536 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -762,9 +762,10 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
dst_type,
length_cur);
if (qed_status) {
- DP_NOTICE(p_hwfn,
- "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
- qed_status, src_addr, dst_addr, length_cur);
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_DMAE_FAIL,
+ "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+ qed_status, src_addr,
+ dst_addr, length_cur);
break;
}
}
@@ -837,6 +838,41 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc;
}
+void qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_hw_err_type err_type, char *fmt, ...)
+{
+ char buf[QED_HW_ERR_MAX_STR_SIZE];
+ va_list vl;
+ int len;
+
+ if (fmt) {
+ va_start(vl, fmt);
+ len = vsnprintf(buf, QED_HW_ERR_MAX_STR_SIZE, fmt, vl);
+ va_end(vl);
+
+ if (len > QED_HW_ERR_MAX_STR_SIZE - 1)
+ len = QED_HW_ERR_MAX_STR_SIZE - 1;
+
+ DP_NOTICE(p_hwfn, "%s", buf);
+ }
+
+ /* Fan failure cannot be masked by handling of another HW error */
+ if (p_hwfn->cdev->recov_in_prog &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_DRV,
+ "Recovery is in progress. Avoid notifying about HW error %d.\n",
+ err_type);
+ return;
+ }
+
+ qed_hw_error_occurred(p_hwfn, err_type);
+
+ if (fmt)
+ qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, len);
+}
+
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 505e94db939d..f5b109b04b66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -315,4 +315,19 @@ int qed_init_fw_data(struct qed_dev *cdev,
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase);
+#define QED_HW_ERR_MAX_STR_SIZE 256
+
+/**
+ * @brief qed_hw_err_notify - Notify upper layer driver and management FW
+ * about a HW error.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param err_type
+ * @param fmt - debug data buffer to send to the MFW
+ * @param ... - buffer format args
+ */
+void qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_hw_err_type err_type, char *fmt, ...);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 9f5113639eaf..b7b974f0ef21 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -96,6 +96,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
#define ATTENTION_BB_DIFFERENT BIT(23)
+#define ATTENTION_CLEAR_ENABLE BIT(28)
unsigned int flags;
/* Callback to call if attention will be triggered */
@@ -363,6 +364,21 @@ static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
}
+static int qed_fw_assertion(struct qed_hwfn *p_hwfn)
+{
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT,
+ "FW assertion!\n");
+
+ return -EINVAL;
+}
+
+static int qed_general_attention_35(struct qed_hwfn *p_hwfn)
+{
+ DP_INFO(p_hwfn, "General attention 35!\n");
+
+ return 0;
+}
+
#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
@@ -605,13 +621,15 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{
{ /* After Invert 4 */
- {"General Attention 32", ATTENTION_SINGLE,
- NULL, MAX_BLOCK_ID},
+ {"General Attention 32", ATTENTION_SINGLE |
+ ATTENTION_CLEAR_ENABLE, qed_fw_assertion,
+ MAX_BLOCK_ID},
{"General Attention %d",
(2 << ATTENTION_LENGTH_SHIFT) |
(33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
- {"General Attention 35", ATTENTION_SINGLE,
- NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE |
+ ATTENTION_CLEAR_ENABLE, qed_general_attention_35,
+ MAX_BLOCK_ID},
{"NWS Parity",
ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
@@ -927,9 +945,12 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
qed_int_attn_print(p_hwfn, p_aeu->block_index,
ATTN_TYPE_INTERRUPT, !b_fatal);
-
- /* If the attention is benign, no need to prevent it */
- if (!rc)
+ /* Reach assertion if attention is fatal */
+ if (b_fatal)
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN,
+ "`%s': Fatal attention\n",
+ p_bit_name);
+ else /* If the attention is benign, no need to prevent it */
goto out;
/* Prevent this Attention from being asserted in the future */
@@ -2349,6 +2370,11 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev)
cdev->hwfns[i].b_int_requested = false;
}
+void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable)
+{
+ cdev->attn_clr_en = clr_enable;
+}
+
int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 timer_res, u16 sb_id, bool tx)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 9ad568d93ae6..e09db3386367 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -191,6 +191,17 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
+ * @brief qed_int_attn_clr_enable - sets whether the general behavior is
+ * preventing attentions from being reasserted, or following the
+ * attributes of the specific attention.
+ *
+ * @param cdev
+ * @param clr_enable
+ *
+ */
+void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
+
+/**
* @brief - Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 38a1d26ca9db..83e798d4eebb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -2468,6 +2468,39 @@ void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
ops->schedule_recovery_handler(cookie);
}
+char *qed_hw_err_type_descr[] = {
+ [QED_HW_ERR_FAN_FAIL] = "Fan Failure",
+ [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure",
+ [QED_HW_ERR_HW_ATTN] = "HW Attention",
+ [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure",
+ [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure",
+ [QED_HW_ERR_FW_ASSERT] = "FW Assertion",
+ [QED_HW_ERR_LAST] = "Unknown",
+};
+
+void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
+ enum qed_hw_err_type err_type)
+{
+ struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+ char *err_str;
+
+ if (err_type > QED_HW_ERR_LAST)
+ err_type = QED_HW_ERR_LAST;
+ err_str = qed_hw_err_type_descr[err_type];
+
+ DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
+
+ /* Call the HW error handler of the protocol driver.
+ * If it is not available - perform a minimal handling of preventing
+ * HW attentions from being reasserted.
+ */
+ if (ops && ops->schedule_hw_err_handler)
+ ops->schedule_hw_err_handler(cookie, err_type);
+ else
+ qed_int_attn_clr_enable(p_hwfn->cdev, true);
+}
+
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
void *handle)
{
@@ -2689,6 +2722,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.set_led = &qed_set_led,
.recovery_process = &qed_recovery_process,
.recovery_prolog = &qed_recovery_prolog,
+ .attn_clr_enable = &qed_int_attn_clr_enable,
.update_drv_state = &qed_update_drv_state,
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 280527cc0578..9624616806e7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -575,6 +575,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
qed_mcp_cmd_set_blocking(p_hwfn, true);
+ qed_hw_err_notify(p_hwfn, p_ptt,
+ QED_HW_ERR_MFW_RESP_FAIL, NULL);
return -EAGAIN;
}
@@ -1704,6 +1706,127 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
&resp, &param);
}
+static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ /* A single notification should be sent to upper driver in CMT mode */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
+ "Fan failure was detected on the network interface card and it's going to be shut down.\n");
+}
+
+struct qed_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
+static int
+qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mdump_cmd_params *p_mdump_cmd_params)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ DP_INFO(p_hwfn,
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = -EOPNOTSUPP;
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+ int rc;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
+ mdump_cmd_params.p_data_dst = p_mdump_retain;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
+
+ rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct mdump_retain_data_stc mdump_retain;
+ int rc;
+
+ /* In CMT mode - no need for more than a single acknowledgment to the
+ * MFW, and no more than a single notification to the upper driver.
+ */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
+ if (rc == 0 && mdump_retain.valid)
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
+ mdump_retain.epoch,
+ mdump_retain.pf, mdump_retain.status);
+ else
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device\n");
+
+ DP_NOTICE(p_hwfn,
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
+ qed_mcp_mdump_ack(p_hwfn, p_ptt);
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
+}
+
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct public_func shmem_info;
@@ -1850,6 +1973,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_S_TAG_UPDATE:
qed_mcp_update_stag(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_FAILURE_DETECTED:
+ qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
+ qed_mcp_handle_critical_error(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_GET_TLV_REQ:
qed_mfw_tlv_req(p_hwfn);
break;
@@ -3819,3 +3948,127 @@ int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
DRV_MSG_CODE_SET_NVM_CFG_OPTION,
mb_param, &resp, &param, len, (u32 *)p_buf);
}
+
+#define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
+#define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
+#define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
+ (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
+
+static int
+__qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
+ DP_ERR(p_hwfn,
+ "Debug data size is %d while it should not exceed %d\n",
+ size, QED_MCP_DBG_DATA_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
+ SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
+ mb_params.p_data_src = p_buf;
+ mb_params.data_src_size = size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
+ DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
+ return -EBUSY;
+ } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send debug data to the MFW [resp 0x%08x]\n",
+ mb_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum qed_mcp_dbg_data_type {
+ QED_MCP_DBG_DATA_TYPE_RAW,
+};
+
+/* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
+#define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
+#define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
+#define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
+#define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
+#define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
+#define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
+#define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
+#define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
+
+#define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
+#define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
+
+static int
+qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
+{
+ u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
+ u32 tmp_size = size, *p_header, *p_payload;
+ u8 flags = 0;
+ u16 seq;
+ int rc;
+
+ p_header = (u32 *)raw_data;
+ p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
+
+ seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
+
+ /* First chunk is marked as 'first' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+
+ *p_header = 0;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
+
+ while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
+ memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
+ rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ QED_MCP_DBG_DATA_MAX_SIZE);
+ if (rc)
+ return rc;
+
+ /* Clear the 'first' marking after sending the first chunk */
+ if (p_tmp_buf == p_buf) {
+ flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
+ flags);
+ }
+
+ p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ }
+
+ /* Last chunk is marked as 'last' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ memcpy(p_payload, p_tmp_buf, tmp_size);
+
+ /* Casting the left size to u8 is ok since at this point it is <= 32 */
+ return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
+ tmp_size));
+}
+
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
+{
+ return qed_mcp_send_debug_data(p_hwfn, p_ptt,
+ QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 9c4c2763de8d..5750b4c5ef63 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -685,6 +685,18 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
*/
int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+/**
+ * @brief Send raw debug data to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_buf - raw debug data buffer
+ * @param size - buffer size
+ */
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -731,6 +743,9 @@ struct qed_mcp_info {
/* Capabilties negotiated with the MFW */
u32 capabilities;
+
+ /* S/N for debug data mailbox commands */
+ atomic_t dbg_data_seq;
};
struct qed_mcp_mb_params {
@@ -1001,6 +1016,19 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
+/* @brief - Gets the mdump retained data from the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_retain
+ *
+ * @param return 0 upon success.
+ */
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain);
+
/**
* @brief - Sets the MFW's max value for the given resource
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index f5f3c03b9dd2..790c28d696a0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -160,12 +160,16 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
return 0;
}
err:
- DP_NOTICE(p_hwfn,
- "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
- le32_to_cpu(p_ent->elem.hdr.cid),
- p_ent->elem.hdr.cmd_id,
- p_ent->elem.hdr.protocol_id,
- le16_to_cpu(p_ent->elem.hdr.echo));
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
+ "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+ le32_to_cpu(p_ent->elem.hdr.cid),
+ p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ le16_to_cpu(p_ent->elem.hdr.echo));
+ qed_ptt_release(p_hwfn, p_ptt);
return -EBUSY;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 368e88565783..aabeaf03135e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -32,6 +32,7 @@
#ifndef _QED_SRIOV_H
#define _QED_SRIOV_H
+#include <linux/crash_dump.h>
#include <linux/types.h>
#include "qed_vf.h"
@@ -40,9 +41,12 @@
#define QED_VF_ARRAY_LENGTH (3)
#ifdef CONFIG_QED_SRIOV
-#define IS_VF(cdev) ((cdev)->b_is_vf)
-#define IS_PF(cdev) (!((cdev)->b_is_vf))
-#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
+#define IS_VF(cdev) (is_kdump_kernel() ? \
+ (0) : ((cdev)->b_is_vf))
+#define IS_PF(cdev) (is_kdump_kernel() ? \
+ (1) : !((cdev)->b_is_vf))
+#define IS_PF_SRIOV(p_hwfn) (is_kdump_kernel() ? \
+ (0) : !!((p_hwfn)->cdev->p_iov_info))
#else
#define IS_VF(cdev) (0)
#define IS_PF(cdev) (1)
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 1a708f95ce94..8857da1208d7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -278,6 +278,14 @@ struct qede_dev {
struct qede_rdma_dev rdma_info;
struct bpf_prog *xdp_prog;
+
+ unsigned long err_flags;
+#define QEDE_ERR_IS_HANDLED 31
+#define QEDE_ERR_ATTN_CLR_EN 0
+#define QEDE_ERR_GET_DBG_INFO 1
+#define QEDE_ERR_IS_RECOVERABLE 2
+#define QEDE_ERR_WARN 3
+
struct qede_dump_info dump_info;
};
@@ -485,12 +493,15 @@ struct qede_fastpath {
#define QEDE_SP_RECOVERY 0
#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RSVD1 2
+#define QEDE_SP_RSVD2 3
+#define QEDE_SP_HW_ERR 4
+#define QEDE_SP_ARFS_CONFIG 5
#define QEDE_SP_AER 7
#ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-#define QEDE_SP_ARFS_CONFIG 4
#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
#endif
@@ -522,7 +533,6 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
-void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
int qede_free_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq, int *len);
@@ -575,12 +585,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_KDUMP_MIN 63
#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_KDUMP_MIN 63
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
#define QEDE_MIN_PKT_LEN 64
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 812c7766e096..24cc68391ac4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -190,12 +190,14 @@ static const struct {
enum {
QEDE_PRI_FLAG_CMT,
QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
+ QEDE_PRI_FLAG_RECOVER_ON_ERROR,
QEDE_PRI_FLAG_LEN,
};
static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Coupled-Function",
"SmartAN capable",
+ "Recover on error",
};
enum qede_ethtool_tests {
@@ -417,9 +419,30 @@ static u32 qede_get_priv_flags(struct net_device *dev)
if (edev->dev_info.common.smart_an)
flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT);
+ if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE))
+ flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR);
+
return flags;
}
+static int qede_set_priv_flags(struct net_device *dev, u32 flags)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 cflags = qede_get_priv_flags(dev);
+ u32 dflags = flags ^ cflags;
+
+ /* can only change RECOVER_ON_ERROR flag */
+ if (dflags & ~BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ return -EINVAL;
+
+ if (flags & BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ set_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+ else
+ clear_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+
+ return 0;
+}
+
struct qede_link_mode_mapping {
u32 qed_link_mode;
u32 ethtool_link_mode;
@@ -2098,6 +2121,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_phys_id = qede_set_phys_id,
.get_ethtool_stats = qede_get_ethtool_stats,
.get_priv_flags = qede_get_priv_flags,
+ .set_priv_flags = qede_set_priv_flags,
.get_sset_count = qede_get_sset_count,
.get_rxnfc = qede_get_rxnfc,
.set_rxnfc = qede_set_rxnfc,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9b456198cb50..f50d9a9b76be 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <linux/crash_dump.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/version.h>
@@ -138,10 +139,12 @@ static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
static void qede_schedule_recovery_handler(void *dev);
static void qede_recovery_handler(struct qede_dev *edev);
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
static void qede_get_eth_tlv_data(void *edev, void *data);
static void qede_get_generic_tlv_data(void *edev,
struct qed_generic_tlvs *data);
-
+static void qede_generic_hw_err_handler(struct qede_dev *edev);
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
@@ -229,6 +232,7 @@ static struct qed_eth_cb_ops qede_ll_ops = {
#endif
.link_update = qede_link_update,
.schedule_recovery_handler = qede_schedule_recovery_handler,
+ .schedule_hw_err_handler = qede_schedule_hw_err_handler,
.get_generic_tlv_data = qede_get_generic_tlv_data,
.get_protocol_tlv_data = qede_get_eth_tlv_data,
},
@@ -535,6 +539,51 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
+static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ DP_NOTICE(edev,
+ "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
+ txq->index, le16_to_cpu(*txq->hw_cons_ptr),
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl),
+ jiffies);
+}
+
+static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_tx_queue *txq;
+ int cos;
+
+ netif_carrier_off(dev);
+ DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
+
+ if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
+ return;
+
+ for_each_cos_in_txq(edev, cos) {
+ txq = &edev->fp_array[txqueue].txq[cos];
+
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl))
+ qede_tx_log_print(edev, txq);
+ }
+
+ if (IS_VF(edev))
+ return;
+
+ if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) {
+ DP_INFO(edev,
+ "Avoid handling a Tx timeout while another HW error is being handled\n");
+ return;
+ }
+
+ set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -622,6 +671,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
.ndo_do_ioctl = qede_ioctl,
+ .ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
.ndo_set_vf_vlan = qede_set_vf_vlan,
@@ -715,8 +765,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->dp_module = dp_module;
edev->dp_level = dp_level;
edev->ops = qed_ops;
- edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
- edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+ if (is_kdump_kernel()) {
+ edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
+ edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
+ } else {
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+ }
DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
info->num_queues, info->num_queues);
@@ -1002,6 +1058,8 @@ static void qede_sp_task(struct work_struct *work)
qede_process_arfs_filters(edev, false);
}
#endif
+ if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
+ qede_generic_hw_err_handler(edev);
__qede_unlock(edev);
if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
@@ -1207,7 +1265,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case QEDE_PRIVATE_VF:
if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a VF\n");
- is_vf = true;
+ is_vf = is_kdump_kernel() ? false : true;
break;
default:
if (debug & QED_LOG_VERBOSE_MASK)
@@ -1714,7 +1772,7 @@ static void qede_init_fp(struct qede_dev *edev)
txq->ndev_txq_id = ndev_tx_id;
if (edev->dev_info.is_legacy)
- txq->is_legacy = 1;
+ txq->is_legacy = true;
txq->dev = &edev->pdev->dev;
}
@@ -2502,6 +2560,100 @@ err:
qede_recovery_failed(edev);
}
+static void qede_atomic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Get a call trace of the flow that led to the error */
+ WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
+
+ /* Prevent HW attentions from being reasserted */
+ if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
+ edev->ops->common->attn_clr_enable(cdev, true);
+
+ DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
+}
+
+static void qede_generic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Trigger a recovery process.
+ * This is placed in the sleep requiring section just to make
+ * sure it is the last one, and that all the other operations
+ * were completed.
+ */
+ if (test_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags))
+ edev->ops->common->recovery_process(cdev);
+
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+
+ DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
+}
+
+static void qede_set_hw_err_flags(struct qede_dev *edev,
+ enum qed_hw_err_type err_type)
+{
+ unsigned long err_flags = 0;
+
+ switch (err_type) {
+ case QED_HW_ERR_DMAE_FAIL:
+ set_bit(QEDE_ERR_WARN, &err_flags);
+ fallthrough;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_RAMROD_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
+ set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
+ break;
+
+ default:
+ DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
+ break;
+ }
+
+ edev->err_flags |= err_flags;
+}
+
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type)
+{
+ struct qede_dev *edev = dev;
+
+ /* Fan failure cannot be masked by handling of another HW error or by a
+ * concurrent recovery process.
+ */
+ if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_INFO(edev,
+ "Avoid scheduling an error handling while another HW error is being handled\n");
+ return;
+ }
+
+ if (err_type >= QED_HW_ERR_LAST) {
+ DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+ return;
+ }
+
+ qede_set_hw_err_flags(edev, err_type);
+ qede_atomic_hw_err_handler(edev);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
+}
+
static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f7c2f32237cb..7adbb03cb931 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1582,10 +1582,10 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
!adapter->fdb_mac_learn) {
qlcnic_alloc_lb_filters_mem(adapter);
- adapter->drv_mac_learn = 1;
+ adapter->drv_mac_learn = true;
adapter->rx_mac_learn = true;
} else {
- adapter->drv_mac_learn = 0;
+ adapter->drv_mac_learn = false;
adapter->rx_mac_learn = false;
}
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 251d4ac4af02..117188e3c7de 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1431,8 +1431,9 @@ error:
}
/* Transmit the packet using specified transmit queue */
-int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
- struct sk_buff *skb)
+netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q,
+ struct sk_buff *skb)
{
struct emac_tpd tpd;
u32 prod_idx;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
index ae08bdd9046c..920123eb8ace 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -227,8 +227,9 @@ void emac_mac_stop(struct emac_adapter *adpt);
void emac_mac_mode_config(struct emac_adapter *adpt);
void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
int *num_pkts, int max_pkts);
-int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
- struct sk_buff *skb);
+netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q,
+ struct sk_buff *skb);
void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q);
void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
struct emac_adapter *adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 18b0c7a2d6dc..20b1b43a0e39 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -115,7 +115,8 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget)
}
/* Transmit the packet */
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 8b665f2ec21f..d926583b407f 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -19,7 +18,6 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
-#include <linux/crc32.h>
#include <linux/in.h>
#include <linux/io.h>
#include <linux/ip.h>
@@ -725,55 +723,35 @@ struct rtl_cond {
const char *msg;
};
-static void rtl_udelay(unsigned int d)
-{
- udelay(d);
-}
-
static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
- void (*delay)(unsigned int), unsigned int d, int n,
- bool high)
+ unsigned long usecs, int n, bool high)
{
int i;
for (i = 0; i < n; i++) {
if (c->check(tp) == high)
return true;
- delay(d);
+ fsleep(usecs);
}
if (net_ratelimit())
- netdev_err(tp->dev, "%s == %d (loop: %d, delay: %d).\n",
- c->msg, !high, n, d);
+ netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n",
+ c->msg, !high, n, usecs);
return false;
}
-static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
+static bool rtl_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
{
- return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
+ return rtl_loop_wait(tp, c, d, n, true);
}
-static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
+static bool rtl_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
{
- return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
-}
-
-static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
-{
- return rtl_loop_wait(tp, c, msleep, d, n, true);
-}
-
-static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
-{
- return rtl_loop_wait(tp, c, msleep, d, n, false);
+ return rtl_loop_wait(tp, c, d, n, false);
}
#define DECLARE_RTL_COND(name) \
@@ -808,7 +786,7 @@ static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
- rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
+ rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
}
static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
@@ -818,7 +796,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
RTL_W32(tp, GPHY_OCP, reg << 15);
- return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
+ return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
(RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
}
@@ -896,7 +874,7 @@ static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
- rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
+ rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
/*
* According to hardware specs a 20us delay is required after write
* complete indication, but before sending next command.
@@ -910,7 +888,7 @@ static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
- value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
+ value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT;
/*
@@ -933,7 +911,7 @@ static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
RTL_W32(tp, EPHY_RXER_NUM, 0);
- rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
+ rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
}
static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
@@ -950,7 +928,7 @@ static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
RTL_W32(tp, EPHY_RXER_NUM, 0);
- return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
}
@@ -1036,7 +1014,7 @@ static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
+ rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
udelay(10);
}
@@ -1045,7 +1023,7 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
@@ -1061,7 +1039,7 @@ static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
RTL_W32(tp, ERIDR, val);
RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
- rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
+ rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
@@ -1074,7 +1052,7 @@ static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
- return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
RTL_R32(tp, ERIDR) : ~0;
}
@@ -1107,7 +1085,7 @@ static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask,
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
+ return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
RTL_R32(tp, OCPDR) : ~0;
}
@@ -1121,7 +1099,7 @@ static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
{
RTL_W32(tp, OCPDR, data);
RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
+ rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
}
static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
@@ -1169,7 +1147,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
{
RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
- rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
+ rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000);
RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
@@ -1177,7 +1155,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
+ rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
@@ -1185,7 +1163,7 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30,
r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
- rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
+ rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
@@ -1208,7 +1186,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
+ rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
@@ -1217,7 +1195,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
r8168ep_ocp_write(tp, 0x01, 0x30,
r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
- rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
+ rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
@@ -1278,7 +1256,7 @@ u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
- return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
+ return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
@@ -1615,7 +1593,7 @@ static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
RTL_W32(tp, CounterAddrLow, cmd);
RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
- rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
+ rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
}
static void rtl8169_reset_counters(struct rtl8169_private *tp)
@@ -1940,12 +1918,6 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
goto out;
}
- if (dev->phydev->autoneg == AUTONEG_DISABLE ||
- dev->phydev->duplex != DUPLEX_FULL) {
- ret = -EPROTONOSUPPORT;
- goto out;
- }
-
ret = phy_ethtool_set_eee(tp->phydev, data);
if (!ret)
@@ -2472,7 +2444,7 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
{
RTL_W8(tp, ChipCmd, CmdReset);
- rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
+ rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
static void rtl_request_firmware(struct rtl8169_private *tp)
@@ -2515,6 +2487,33 @@ DECLARE_RTL_COND(rtl_txcfg_empty_cond)
return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
}
+DECLARE_RTL_COND(rtl_rxtx_empty_cond)
+{
+ return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+}
+
+static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
+ rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
+ rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
+ break;
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
+{
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
+ fsleep(2000);
+ rtl_wait_txrx_fifo_empty(tp);
+}
+
static void rtl8169_hw_reset(struct rtl8169_private *tp)
{
/* Disable interrupts */
@@ -2526,12 +2525,15 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
- rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
+ rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
break;
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
- rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
+ rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
+ rtl_enable_rxdvgate(tp);
+ fsleep(2000);
break;
default:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
@@ -2610,7 +2612,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, dev) {
- u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ u32 bit_nr = eth_hw_addr_crc(ha) >> 26;
mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
}
@@ -2641,7 +2643,7 @@ static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE | func << 16);
- rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
+ rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
@@ -2651,7 +2653,7 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
CSIAR_BYTE_ENABLE);
- return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
RTL_R32(tp, CSIDR) : ~0;
}
@@ -3606,7 +3608,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xe098, 0xc302);
- rtl_udelay_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
+ rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
rtl8125_config_eee_mac(tp);
@@ -5076,9 +5078,9 @@ DECLARE_RTL_COND(rtl_link_list_ready_cond)
return RTL_R8(tp, MCU) & LINK_LIST_RDY;
}
-DECLARE_RTL_COND(rtl_rxtx_empty_cond)
+static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp)
{
- return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+ rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
}
static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
@@ -5147,49 +5149,34 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
- return;
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
- return;
+ rtl_enable_rxdvgate(tp);
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
- return;
+ r8168g_wait_ll_share_fifo_ready(tp);
r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
-
- rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
+ r8168g_wait_ll_share_fifo_ready(tp);
}
static void rtl_hw_init_8125(struct rtl8169_private *tp)
{
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
- return;
+ rtl_enable_rxdvgate(tp);
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
- return;
+ r8168g_wait_ll_share_fifo_ready(tp);
r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0);
r8168_mac_ocp_write(tp, 0xc0a6, 0x0150);
r8168_mac_ocp_write(tp, 0xc01e, 0x5555);
-
- rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
+ r8168g_wait_ll_share_fifo_ready(tp);
}
static void rtl_hw_initialize(struct rtl8169_private *tp)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8ed73f44405d..f45331ed90b0 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2472,7 +2472,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
}
/* Packet transmit function */
-static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 3f16bd807c6e..e634e8110585 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -553,7 +553,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
/* In case we're recovering from a crash (kexec), we want to
* cancel any outstanding request by the previous user of this
@@ -1281,13 +1281,13 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_check_datapath_caps = false;
}
- if (nic_data->must_realloc_vis) {
+ if (efx->must_realloc_vis) {
/* We cannot let the number of VIs change now */
rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
nic_data->n_allocated_vis);
if (rc)
return rc;
- nic_data->must_realloc_vis = false;
+ efx->must_realloc_vis = false;
}
if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
@@ -1326,16 +1326,15 @@ static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
#endif
/* All our allocations have been reset */
- nic_data->must_realloc_vis = true;
- nic_data->must_restore_rss_contexts = true;
- nic_data->must_restore_filters = true;
+ efx->must_realloc_vis = true;
+ efx_mcdi_filter_table_reset_mc_allocations(efx);
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
#ifdef CONFIG_SFC_SRIOV
if (nic_data->vf)
for (i = 0; i < efx->vf_count; i++)
@@ -2389,6 +2388,86 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
+static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int enabled, implemented;
+ bool want_workaround_26807;
+ int rc;
+
+ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+ if (rc == -ENOSYS) {
+ /* GET_WORKAROUNDS was implemented before this workaround,
+ * thus it must be unavailable in this firmware.
+ */
+ nic_data->workaround_26807 = false;
+ return 0;
+ }
+ if (rc)
+ return rc;
+ want_workaround_26807 =
+ implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807;
+ nic_data->workaround_26807 =
+ !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
+
+ if (want_workaround_26807 && !nic_data->workaround_26807) {
+ unsigned int flags;
+
+ rc = efx_mcdi_set_workaround(efx,
+ MC_CMD_WORKAROUND_BUG26807,
+ true, &flags);
+ if (!rc) {
+ if (flags &
+ 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+ netif_info(efx, drv, efx->net_dev,
+ "other functions on NIC have been reset\n");
+
+ /* With MCFW v4.6.x and earlier, the
+ * boot count will have incremented,
+ * so re-read the warm_boot_count
+ * value now to ensure this function
+ * doesn't think it has changed next
+ * time it checks.
+ */
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc >= 0) {
+ nic_data->warm_boot_count = rc;
+ rc = 0;
+ }
+ }
+ nic_data->workaround_26807 = true;
+ } else if (rc == -EPERM) {
+ rc = 0;
+ }
+ }
+ return rc;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc = efx_ef10_probe_multicast_chaining(efx);
+ struct efx_mcdi_filter_vlan *vlan;
+
+ if (rc)
+ return rc;
+ rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
+
+ if (rc)
+ return rc;
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
+ if (rc)
+ goto fail_add_vlan;
+ }
+ return 0;
+
+fail_add_vlan:
+ efx_mcdi_filter_table_remove(efx);
+ return rc;
+}
+
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -2464,75 +2543,14 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data;
- unsigned int enabled, implemented;
bool use_v2, cut_thru;
- int rc;
nic_data = efx->nic_data;
use_v2 = nic_data->datapath_caps2 &
1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
cut_thru = !(nic_data->datapath_caps &
1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
- rc = efx_mcdi_ev_init(channel, cut_thru, use_v2);
-
- /* IRQ return is ignored */
- if (channel->channel || rc)
- return rc;
-
- /* Successfully created event queue on channel 0 */
- rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
- if (rc == -ENOSYS) {
- /* GET_WORKAROUNDS was implemented before this workaround,
- * thus it must be unavailable in this firmware.
- */
- nic_data->workaround_26807 = false;
- rc = 0;
- } else if (rc) {
- goto fail;
- } else {
- nic_data->workaround_26807 =
- !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
-
- if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
- !nic_data->workaround_26807) {
- unsigned int flags;
-
- rc = efx_mcdi_set_workaround(efx,
- MC_CMD_WORKAROUND_BUG26807,
- true, &flags);
-
- if (!rc) {
- if (flags &
- 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
- netif_info(efx, drv, efx->net_dev,
- "other functions on NIC have been reset\n");
-
- /* With MCFW v4.6.x and earlier, the
- * boot count will have incremented,
- * so re-read the warm_boot_count
- * value now to ensure this function
- * doesn't think it has changed next
- * time it checks.
- */
- rc = efx_ef10_get_warm_boot_count(efx);
- if (rc >= 0) {
- nic_data->warm_boot_count = rc;
- rc = 0;
- }
- }
- nic_data->workaround_26807 = true;
- } else if (rc == -EPERM) {
- rc = 0;
- }
- }
- }
-
- if (!rc)
- return 0;
-
-fail:
- efx_mcdi_ev_fini(channel);
- return rc;
+ return efx_mcdi_ev_init(channel, cut_thru, use_v2);
}
static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
@@ -3100,16 +3118,15 @@ void efx_ef10_handle_drain_event(struct efx_nic *efx)
static int efx_ef10_fini_dmaq(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
int pending;
/* If the MC has just rebooted, the TX/RX queues will have already been
* torn down, but efx->active_queues needs to be set to zero.
*/
- if (nic_data->must_realloc_vis) {
+ if (efx->must_realloc_vis) {
atomic_set(&efx->active_queues, 0);
return 0;
}
@@ -3158,22 +3175,22 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem);
- rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
if (rc)
goto restore_filters;
ether_addr_copy(mac_old, nic_data->vport_mac);
- rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ rc = efx_ef10_vport_del_mac(efx, efx->vport_id,
nic_data->vport_mac);
if (rc)
goto restore_vadaptor;
- rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
+ rc = efx_ef10_vport_add_mac(efx, efx->vport_id,
efx->net_dev->dev_addr);
if (!rc) {
ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
} else {
- rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
+ rc2 = efx_ef10_vport_add_mac(efx, efx->vport_id, mac_old);
if (rc2) {
/* Failed to add original MAC, so clear vport_mac */
eth_zero_addr(nic_data->vport_mac);
@@ -3182,12 +3199,12 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
}
restore_vadaptor:
- rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
if (rc2)
goto reset_nic;
restore_filters:
down_write(&efx->filter_sem);
- rc2 = efx_mcdi_filter_table_probe(efx);
+ rc2 = efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -3225,11 +3242,11 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
+ efx->vport_id);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
sizeof(inbuf), NULL, 0, NULL);
- efx_mcdi_filter_table_probe(efx);
+ efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -3961,6 +3978,35 @@ out_unlock:
return rc;
}
+/* EF10 may have multiple datapath firmware variants within a
+ * single version. Report which variants are running.
+ */
+static size_t efx_ef10_print_additional_fwver(struct efx_nic *efx, char *buf,
+ size_t len)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return scnprintf(buf, len, " rx%x tx%x",
+ nic_data->rx_dpcpu_fw_id,
+ nic_data->tx_dpcpu_fw_id);
+}
+
+static unsigned int ef10_check_caps(const struct efx_nic *efx,
+ u8 flag,
+ u32 offset)
+{
+ const struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ switch (offset) {
+ case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST):
+ return nic_data->datapath_caps & BIT_ULL(flag);
+ case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST):
+ return nic_data->datapath_caps2 & BIT_ULL(flag);
+ default:
+ return 0;
+ }
+}
+
#define EF10_OFFLOAD_FEATURES \
(NETIF_F_IP_CSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
@@ -4027,7 +4073,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
.filter_table_remove = efx_mcdi_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
@@ -4073,6 +4119,8 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -4139,7 +4187,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
.filter_table_remove = efx_mcdi_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
@@ -4208,4 +4256,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
};
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 4580b30caae1..21fa6c0e8873 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -232,15 +232,14 @@ fail:
static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 port_flags;
int rc;
- rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
if (rc)
goto fail_vadaptor_alloc;
- rc = efx_ef10_vadaptor_query(efx, nic_data->vport_id,
+ rc = efx_ef10_vadaptor_query(efx, efx->vport_id,
&port_flags, NULL, NULL);
if (rc)
goto fail_vadaptor_query;
@@ -281,11 +280,11 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
- EFX_EF10_NO_VLAN, &nic_data->vport_id);
+ EFX_EF10_NO_VLAN, &efx->vport_id);
if (rc)
goto fail2;
- rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
+ rc = efx_ef10_vport_add_mac(efx, efx->vport_id, net_dev->dev_addr);
if (rc)
goto fail3;
ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
@@ -296,11 +295,11 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
return 0;
fail4:
- efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
+ efx_ef10_vport_del_mac(efx, efx->vport_id, nic_data->vport_mac);
eth_zero_addr(nic_data->vport_mac);
fail3:
- efx_ef10_vport_free(efx, nic_data->vport_id);
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx_ef10_vport_free(efx, efx->vport_id);
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
fail2:
efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
fail1:
@@ -355,22 +354,22 @@ void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
efx_ef10_sriov_free_vf_vswitching(efx);
- efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ efx_ef10_vadaptor_free(efx, efx->vport_id);
- if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
+ if (efx->vport_id == EVB_PORT_ID_ASSIGNED)
return; /* No vswitch was ever created */
if (!is_zero_ether_addr(nic_data->vport_mac)) {
- efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ efx_ef10_vport_del_mac(efx, efx->vport_id,
efx->net_dev->dev_addr);
eth_zero_addr(nic_data->vport_mac);
}
- efx_ef10_vport_free(efx, nic_data->vport_id);
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx_ef10_vport_free(efx, efx->vport_id);
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
/* Only free the vswitch if no VFs are assigned */
if (!pci_vfs_assigned(efx->pci_dev))
- efx_ef10_vswitch_free(efx, nic_data->vport_id);
+ efx_ef10_vswitch_free(efx, efx->vport_id);
}
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 15c731d04065..a8cc3881edce 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1425,23 +1425,16 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
le16_to_cpu(ver_words[2]),
le16_to_cpu(ver_words[3]));
- /* EF10 may have multiple datapath firmware variants within a
- * single version. Report which variants are running.
- */
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- offset += scnprintf(buf + offset, len - offset, " rx%x tx%x",
- nic_data->rx_dpcpu_fw_id,
- nic_data->tx_dpcpu_fw_id);
+ if (efx->type->print_additional_fwver)
+ offset += efx->type->print_additional_fwver(efx, buf + offset,
+ len - offset);
- /* It's theoretically possible for the string to exceed 31
- * characters, though in practice the first three version
- * components are short enough that this doesn't happen.
- */
- if (WARN_ON(offset >= len))
- buf[0] = 0;
- }
+ /* It's theoretically possible for the string to exceed 31
+ * characters, though in practice the first three version
+ * components are short enough that this doesn't happen.
+ */
+ if (WARN_ON(offset >= len))
+ buf[0] = 0;
return;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 54a45010b576..b107e4c00285 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -326,6 +326,18 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+#define MCDI_CAPABILITY(field) \
+ MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _LBN
+
+#define MCDI_CAPABILITY_OFST(field) \
+ MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _OFST
+
+/* field is FLAGS1 or FLAGS2 */
+#define efx_has_cap(efx, flag, field) \
+ efx->type->check_caps(efx, \
+ MCDI_CAPABILITY(flag), \
+ MCDI_CAPABILITY_OFST(field))
+
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 4310ae5bd898..455a62814fb9 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -186,7 +186,6 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
struct efx_rss_context *ctx,
bool replacing)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 flags = spec->flags;
memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
@@ -211,7 +210,7 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
@@ -332,7 +331,6 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
bool replace_equal)
{
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct efx_mcdi_filter_table *table;
struct efx_filter_spec *saved_spec;
struct efx_rss_context *ctx = NULL;
@@ -461,7 +459,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle,
ctx, replacing);
- if (rc == -EINVAL && nic_data->must_realloc_vis)
+ if (rc == -EINVAL && efx->must_realloc_vis)
/* The MC rebooted under us, causing it to reject our filter
* insertion as pointing to an invalid VI (spec->dmaq_id).
*/
@@ -813,7 +811,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
enum efx_encap_type encap_type,
bool multicast, bool rollback)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
u8 baddr[ETH_ALEN];
@@ -830,8 +828,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
efx_filter_set_uc_def(&spec);
if (encap_type) {
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1))
efx_filter_set_encap_type(&spec, encap_type);
else
/*
@@ -899,7 +896,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
*id = efx_mcdi_filter_get_unsafe_id(rc);
- if (!nic_data->workaround_26807 && !encap_type) {
+ if (!table->mc_chaining && !encap_type) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags, 0);
@@ -965,7 +962,6 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
struct efx_mcdi_filter_vlan *vlan)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
/*
* Do not install unspecified VID if VLAN filtering is enabled.
@@ -1012,11 +1008,10 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
* If changing promiscuous state with cascaded multicast filters, remove
* old filters first, so that packets are dropped rather than duplicated
*/
- if (nic_data->workaround_26807 &&
- table->mc_promisc_last != table->mc_promisc)
+ if (table->mc_chaining && table->mc_promisc_last != table->mc_promisc)
efx_mcdi_filter_remove_old(efx);
if (table->mc_promisc) {
- if (nic_data->workaround_26807) {
+ if (table->mc_chaining) {
/*
* If we failed to insert promiscuous filters, rollback
* and fall back to individual multicast filters
@@ -1051,7 +1046,7 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
*/
if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) {
/* Changing promisc state, so remove old filters */
- if (nic_data->workaround_26807)
+ if (table->mc_chaining)
efx_mcdi_filter_remove_old(efx);
if (efx_mcdi_filter_insert_def(efx, vlan,
EFX_ENCAP_TYPE_NONE,
@@ -1288,12 +1283,10 @@ efx_mcdi_filter_table_probe_matches(struct efx_nic *efx,
return 0;
}
-int efx_mcdi_filter_table_probe(struct efx_nic *efx)
+int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
struct efx_mcdi_filter_table *table;
- struct efx_mcdi_filter_vlan *vlan;
int rc;
if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
@@ -1306,12 +1299,12 @@ int efx_mcdi_filter_table_probe(struct efx_nic *efx)
if (!table)
return -ENOMEM;
+ table->mc_chaining = multicast_chaining;
table->rx_match_count = 0;
rc = efx_mcdi_filter_table_probe_matches(efx, table, false);
if (rc)
goto fail;
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1))
rc = efx_mcdi_filter_table_probe_matches(efx, table, true);
if (rc)
goto fail;
@@ -1342,22 +1335,22 @@ int efx_mcdi_filter_table_probe(struct efx_nic *efx)
efx->filter_state = table;
- list_for_each_entry(vlan, &nic_data->vlan_list, list) {
- rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
- if (rc)
- goto fail_add_vlan;
- }
-
return 0;
-
-fail_add_vlan:
- efx_mcdi_filter_cleanup_vlans(efx);
- efx->filter_state = NULL;
fail:
kfree(table);
return rc;
}
+void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+
+ if (table) {
+ table->must_restore_filters = true;
+ table->must_restore_rss_contexts = true;
+ }
+}
+
/*
* Caller must hold efx->filter_sem for read if race against
* efx_mcdi_filter_table_remove() is possible
@@ -1365,7 +1358,6 @@ fail:
void efx_mcdi_filter_table_restore(struct efx_nic *efx)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int invalid_filters = 0, failed = 0;
struct efx_mcdi_filter_vlan *vlan;
struct efx_filter_spec *spec;
@@ -1377,10 +1369,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
WARN_ON(!rwsem_is_locked(&efx->filter_sem));
- if (!nic_data->must_restore_filters)
- return;
-
- if (!table)
+ if (!table || !table->must_restore_filters)
return;
down_write(&table->lock);
@@ -1456,7 +1445,7 @@ not_restored:
netif_err(efx, hw, efx->net_dev,
"unable to restore %u filters\n", failed);
else
- nic_data->must_restore_filters = false;
+ table->must_restore_filters = false;
}
void efx_mcdi_filter_table_remove(struct efx_nic *efx)
@@ -1921,7 +1910,6 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
u32 alloc_type = exclusive ?
@@ -1939,12 +1927,11 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
return 0;
}
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+ if (efx_has_cap(efx, RX_RSS_LIMITED, FLAGS1))
return -EOPNOTSUPP;
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
+ efx->vport_id);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
@@ -1961,8 +1948,7 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
if (context_size)
*context_size = rss_spread;
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
+ if (efx_has_cap(efx, ADDITIONAL_RSS_MODES, FLAGS1))
efx_mcdi_set_rss_context_flags(efx, ctx);
return 0;
@@ -2030,14 +2016,14 @@ void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
unsigned *context_size)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context,
context_size);
if (rc != 0)
return rc;
- nic_data->rx_rss_context_exclusive = false;
+ table->rx_rss_context_exclusive = false;
efx_set_default_rx_indir_table(efx, &efx->rss_context);
return 0;
}
@@ -2046,12 +2032,12 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
const u32 *rx_indir_table,
const u8 *key)
{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
u32 old_rx_rss_context = efx->rss_context.context_id;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
- !nic_data->rx_rss_context_exclusive) {
+ !table->rx_rss_context_exclusive) {
rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context,
NULL);
if (rc == -EOPNOTSUPP)
@@ -2068,7 +2054,7 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
if (efx->rss_context.context_id != old_rx_rss_context &&
old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
- nic_data->rx_rss_context_exclusive = true;
+ table->rx_rss_context_exclusive = true;
if (rx_indir_table != efx->rss_context.rx_indir_table)
memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
sizeof(efx->rss_context.rx_indir_table));
@@ -2182,13 +2168,13 @@ int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
struct efx_rss_context *ctx;
int rc;
WARN_ON(!mutex_is_locked(&efx->rss_lock));
- if (!nic_data->must_restore_rss_contexts)
+ if (!table->must_restore_rss_contexts)
return;
list_for_each_entry(ctx, &efx->rss_context.list, list) {
@@ -2204,7 +2190,7 @@ void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
"; RSS filters may fail to be applied\n",
ctx->user_id, rc);
}
- nic_data->must_restore_rss_contexts = false;
+ table->must_restore_rss_contexts = false;
}
int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index 1837f4f5d661..03a8bf74c733 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -55,6 +55,8 @@ struct efx_mcdi_filter_table {
u32 rx_match_mcdi_flags[
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
unsigned int rx_match_count;
+ /* Our RSS context is exclusive (as opposed to shared) */
+ bool rx_rss_context_exclusive;
struct rw_semaphore lock; /* Protects entries */
struct {
@@ -75,14 +77,27 @@ struct efx_mcdi_filter_table {
/* Whether in multicast promiscuous mode when last changed */
bool mc_promisc_last;
bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
+ /* RSS contexts have yet to be restored after MC reboot */
+ bool must_restore_rss_contexts;
+ /* filters have yet to be restored after MC reboot */
+ bool must_restore_filters;
+ /* Multicast filter chaining allows less-specific filters to receive
+ * multicast packets that matched more-specific filters. Early EF10
+ * firmware didn't support this (SF bug 26807); if mc_chaining == false
+ * then we still subscribe the dev_mc_list even when mc_promisc to
+ * prevent another VI stealing the traffic.
+ */
+ bool mc_chaining;
bool vlan_filter;
struct list_head vlan_list;
};
-int efx_mcdi_filter_table_probe(struct efx_nic *efx);
+int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining);
void efx_mcdi_filter_table_remove(struct efx_nic *efx);
void efx_mcdi_filter_table_restore(struct efx_nic *efx);
+void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx);
+
/*
* The filter table(s) are managed by firmware and we have write-only
* access. When removing filters we must identify them to the
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c
index dcfe78b0fa5a..962d8395d958 100644
--- a/drivers/net/ethernet/sfc/mcdi_functions.c
+++ b/drivers/net/ethernet/sfc/mcdi_functions.c
@@ -168,21 +168,18 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- struct efx_ef10_nic_data *nic_data;
dma_addr_t dma_addr;
size_t inlen;
int rc, i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
- nic_data = efx->nic_data;
-
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
@@ -276,7 +273,6 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
dma_addr_t dma_addr;
size_t inlen;
int rc;
@@ -295,7 +291,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index ab5227b13ae6..b807871d8f69 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -722,11 +722,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
- }
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b084e623b5f4..1afb58feb9ab 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -887,8 +887,10 @@ struct efx_async_filter_insertion {
* @rss_context: Main RSS context. Its @list member is the head of the list of
* RSS contexts created by user requests
* @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @vport_id: The function's vport ID, only relevant for PFs
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
* @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
* acknowledge but do nothing else.
* @irq_status: Interrupt status buffer
@@ -1044,10 +1046,12 @@ struct efx_nic {
bool rx_scatter;
struct efx_rss_context rss_context;
struct mutex rss_lock;
+ u32 vport_id;
unsigned int_error_count;
unsigned long int_error_expire;
+ bool must_realloc_vis;
bool irq_soft_enabled;
struct efx_buffer irq_status;
unsigned irq_zero_count;
@@ -1292,6 +1296,7 @@ struct efx_udp_tunnel {
* @udp_tnl_add_port: Add a UDP tunnel port
* @udp_tnl_has_port: Check if a port has been added as UDP tunnel
* @udp_tnl_del_port: Remove a UDP tunnel port
+ * @print_additional_fwver: Dump NIC-specific additional FW version info
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1352,6 +1357,9 @@ struct efx_nic_type {
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type);
void (*resume_wol)(struct efx_nic *efx);
+ unsigned int (*check_caps)(const struct efx_nic *efx,
+ u8 flag,
+ u32 offset);
int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
void (*mcdi_request)(struct efx_nic *efx,
@@ -1462,6 +1470,8 @@ struct efx_nic_type {
int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
+ size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
+ size_t len);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 6670fda8f35a..8f73c5d996eb 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -360,10 +360,6 @@ enum {
* @warm_boot_count: Last seen MC warm boot count
* @vi_base: Absolute index of first VI in this function
* @n_allocated_vis: Number of VIs allocated to this function
- * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
- * @must_restore_rss_contexts: Flag: RSS contexts have yet to be restored after
- * MC reboot
- * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
* @n_piobufs: Number of PIO buffers allocated to this function
* @wc_membase: Base address of write-combining mapping of the memory BAR
* @pio_write_base: Base address for writing PIO buffers
@@ -372,7 +368,6 @@ enum {
* @piobuf_size: size of a single PIO buffer
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
- * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @workaround_26807: Flag: firmware supports workaround for bug 26807
@@ -385,7 +380,6 @@ enum {
* %MC_CMD_GET_CAPABILITIES response)
* @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
- * @vport_id: The function's vport ID, only relevant for PFs
* @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
* @pf_index: The number for this PF, or the parent PF if this is a VF
#ifdef CONFIG_SFC_SRIOV
@@ -404,16 +398,12 @@ struct efx_ef10_nic_data {
u16 warm_boot_count;
unsigned int vi_base;
unsigned int n_allocated_vis;
- bool must_realloc_vis;
- bool must_restore_rss_contexts;
- bool must_restore_filters;
unsigned int n_piobufs;
void __iomem *wc_membase, *pio_write_base;
unsigned int pio_write_vi_base;
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
u16 piobuf_size;
bool must_restore_piobufs;
- bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool workaround_26807;
@@ -423,7 +413,6 @@ struct efx_ef10_nic_data {
u32 datapath_caps2;
unsigned int rx_dpcpu_fw_id;
unsigned int tx_dpcpu_fw_id;
- unsigned int vport_id;
bool must_probe_vswitching;
unsigned int pf_index;
u8 port_id[ETH_ALEN];
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 59b4f16896a8..04c7283d205e 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -352,12 +352,7 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- return ((efx_nic_rev(efx) >= EFX_REV_HUNT_A0) &&
- (nic_data->datapath_caps2 &
- (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN)
- ));
+ return efx_has_cap(efx, TX_MAC_TIMESTAMPING, FLAGS2);
}
/* PTP 'extra' channel is still a traffic channel, but we only create TX queues
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index baa464161626..891e9fb6abec 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -948,6 +948,13 @@ fail:
#endif /* CONFIG_SFC_MTD */
+static unsigned int siena_check_caps(const struct efx_nic *efx,
+ u8 flag, u32 offset)
+{
+ /* Siena did not support MC_CMD_GET_CAPABILITIES */
+ return 0;
+}
+
/**************************************************************************
*
* Revision-dependent attributes used by efx.c and nic.c
@@ -1086,4 +1093,5 @@ const struct efx_nic_type siena_a0_nic_type = {
1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
.rx_hash_key_size = 16,
+ .check_caps = siena_check_caps,
};
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 67ddf782d98a..f2638446b62e 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1394,7 +1394,7 @@ static int ave_stop(struct net_device *ndev)
return 0;
}
-static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ave_private *priv = netdev_priv(ndev);
u32 proc_idx, done_idx, ndesc, cmdsts;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index a3934ca6a043..234e8b6816ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -32,7 +33,10 @@
#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
-#define PRG_ETH0_TXDLY_SHIFT 5
+/* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
+ * cycle of the 125MHz RGMII TX clock):
+ * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
+ */
#define PRG_ETH0_TXDLY_MASK GENMASK(6, 5)
/* divider for the result of m250_sel */
@@ -44,6 +48,27 @@
#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
+/* Bypass (= 0, the signal from the GPIO input directly connects to the
+ * internal sampling) or enable (= 1) the internal logic for RXEN and RXD[3:0]
+ * timing tuning.
+ */
+#define PRG_ETH0_ADJ_ENABLE BIT(13)
+/* Controls whether the RXEN and RXD[3:0] signals should be aligned with the
+ * input RX rising/falling edge and sent to the Ethernet internals. This sets
+ * the automatically delay and skew automatically (internally).
+ */
+#define PRG_ETH0_ADJ_SETUP BIT(14)
+/* An internal counter based on the "timing-adjustment" clock. The counter is
+ * cleared on both, the falling and rising edge of the RX_CLK. This selects the
+ * delay (= the counter value) when to start sampling RXEN and RXD[3:0].
+ */
+#define PRG_ETH0_ADJ_DELAY GENMASK(19, 15)
+/* Adjusts the skew between each bit of RXEN and RXD[3:0]. If a signal has a
+ * large input delay, the bit for that signal (RXEN = bit 0, RXD[3] = bit 1,
+ * ...) can be configured to be 1 to compensate for a delay of about 1ns.
+ */
+#define PRG_ETH0_ADJ_SKEW GENMASK(24, 20)
+
#define MUX_CLK_NUM_PARENTS 2
struct meson8b_dwmac;
@@ -60,6 +85,8 @@ struct meson8b_dwmac {
phy_interface_t phy_mode;
struct clk *rgmii_tx_clk;
u32 tx_delay_ns;
+ u32 rx_delay_ns;
+ struct clk *timing_adj_clk;
};
struct meson8b_dwmac_clk_configs {
@@ -240,30 +267,82 @@ static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
return 0;
}
+static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
+ struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ devm_add_action_or_reset(dwmac->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ dwmac->rgmii_tx_clk);
+
+ return 0;
+}
+
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
+ u32 tx_dly_config, rx_dly_config, delay_config;
int ret;
- u8 tx_dly_val = 0;
+
+ tx_dly_config = FIELD_PREP(PRG_ETH0_TXDLY_MASK,
+ dwmac->tx_delay_ns >> 1);
+
+ if (dwmac->rx_delay_ns == 2)
+ rx_dly_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
+ else
+ rx_dly_config = 0;
switch (dwmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ delay_config = tx_dly_config | rx_dly_config;
+ break;
case PHY_INTERFACE_MODE_RGMII_RXID:
- /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where
- * 8ns are exactly one cycle of the 125MHz RGMII TX clock):
- * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
- */
- tx_dly_val = dwmac->tx_delay_ns >> 1;
- /* fall through */
-
- case PHY_INTERFACE_MODE_RGMII_ID:
+ delay_config = tx_dly_config;
+ break;
case PHY_INTERFACE_MODE_RGMII_TXID:
+ delay_config = rx_dly_config;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_config = 0;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ };
+
+ if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
+ if (!dwmac->timing_adj_clk) {
+ dev_err(dwmac->dev,
+ "The timing-adjustment clock is mandatory for the RX delay re-timing\n");
+ return -EINVAL;
+ }
+
+ /* The timing adjustment logic is driven by a separate clock */
+ ret = meson8b_devm_clk_prepare_enable(dwmac,
+ dwmac->timing_adj_clk);
+ if (ret) {
+ dev_err(dwmac->dev,
+ "Failed to enable the timing-adjustment clock\n");
+ return ret;
+ }
+ }
+
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK |
+ PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP |
+ PRG_ETH0_ADJ_DELAY | PRG_ETH0_ADJ_SKEW,
+ delay_config);
+
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) {
/* only relevant for RMII mode -> disable in RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK, 0);
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
- tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
-
/* Configure the 125MHz RGMII TX clock, the IP block changes
* the output automatically (= without us having to configure
* a register) based on the line-speed (125MHz for Gbit speeds,
@@ -276,34 +355,18 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return ret;
}
- ret = clk_prepare_enable(dwmac->rgmii_tx_clk);
+ ret = meson8b_devm_clk_prepare_enable(dwmac,
+ dwmac->rgmii_tx_clk);
if (ret) {
dev_err(dwmac->dev,
"failed to enable the RGMII TX clock\n");
return ret;
}
-
- devm_add_action_or_reset(dwmac->dev,
- (void(*)(void *))clk_disable_unprepare,
- dwmac->rgmii_tx_clk);
- break;
-
- case PHY_INTERFACE_MODE_RMII:
+ } else {
/* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK,
PRG_ETH0_INVERTED_RMII_CLK);
-
- /* TX clock delay cannot be configured in RMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
- 0);
-
- break;
-
- default:
- dev_err(dwmac->dev, "unsupported phy-mode %s\n",
- phy_modes(dwmac->phy_mode));
- return -EINVAL;
}
/* enable TX_CLK and PHY_REF_CLK generator */
@@ -358,6 +421,25 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
+ /* use 0ns as fallback since this is what most boards actually use */
+ if (of_property_read_u32(pdev->dev.of_node, "amlogic,rx-delay-ns",
+ &dwmac->rx_delay_ns))
+ dwmac->rx_delay_ns = 0;
+
+ if (dwmac->rx_delay_ns != 0 && dwmac->rx_delay_ns != 2) {
+ dev_err(&pdev->dev,
+ "The only allowed RX delays values are: 0ns, 2ns");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
+ "timing-adjustment");
+ if (IS_ERR(dwmac->timing_adj_clk)) {
+ ret = PTR_ERR(dwmac->timing_adj_clk);
+ goto err_remove_config_dt;
+ }
+
ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 494c859b4ade..67ba67ed0cb9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -624,7 +624,7 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
total_offset += offset;
}
- total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000;
+ total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
total_ctr += total_offset;
ctr_low = do_div(total_ctr, 1000000000);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ff22f274aa43..8f08393f25dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3543,15 +3543,6 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
}
}
-
-static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
-{
- if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
- return 0;
-
- return 1;
-}
-
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
@@ -4060,7 +4051,7 @@ static int stmmac_set_features(struct net_device *netdev,
/**
* stmmac_interrupt - main ISR
* @irq: interrupt number.
- * @dev_id: to pass the net device pointer.
+ * @dev_id: to pass the net device pointer (must be valid).
* Description: this is the main driver interrupt service routine.
* It can call:
* o DMA service routine (to manage incoming frame reception and transmission
@@ -4084,11 +4075,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 3ee6ab104cb9..e6e25960da4f 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -237,12 +237,6 @@ static inline void cas_lock_tx(struct cas *cp)
spin_lock_nested(&cp->tx_lock[i], i);
}
-static inline void cas_lock_all(struct cas *cp)
-{
- spin_lock_irq(&cp->lock);
- cas_lock_tx(cp);
-}
-
/* WTZ: QA was finding deadlock problems with the previous
* versions after long test runs with multiple cards per machine.
* See if replacing cas_lock_all with safer versions helps. The
@@ -266,12 +260,6 @@ static inline void cas_unlock_tx(struct cas *cp)
spin_unlock(&cp->tx_lock[i - 1]);
}
-static inline void cas_unlock_all(struct cas *cp)
-{
- cas_unlock_tx(cp);
- spin_unlock_irq(&cp->lock);
-}
-
#define cas_unlock_all_restore(cp, flags) \
do { \
struct cas *xxxcp = (cp); \
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 40a2ce0ca808..e28727297563 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1362,18 +1362,6 @@ static void print_rxfd(struct rxf_desc *rxfd)
* As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput.
*/
-/*************************************************************************
- * Tx DB *
- *************************************************************************/
-static inline int bdx_tx_db_size(struct txdb *db)
-{
- int taken = db->wptr - db->rptr;
- if (taken < 0)
- taken = db->size + 1 + taken; /* (size + 1) equals memsz */
-
- return db->size - taken;
-}
-
/**
* __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap
* @db: tx data base
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 7b0ad777828d..4d4852f00ff7 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -90,9 +90,8 @@ config TI_CPTS
config TI_CPTS_MOD
tristate
depends on TI_CPTS
+ depends on PTP_1588_CLOCK
default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
- select NET_PTP_CLASSIFY
- imply PTP_1588_CLOCK
default m
config TI_K3_AM65_CPSW_NUSS
@@ -100,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select TI_DAVINCI_MDIO
imply PHY_TI_GMII_SEL
+ depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
This driver supports TI K3 AM654/J721E CPSW2G Ethernet SubSystem.
The two-port Gigabit Ethernet MAC (MCU_CPSW0) subsystem provides
@@ -110,6 +110,28 @@ config TI_K3_AM65_CPSW_NUSS
To compile this driver as a module, choose M here: the module
will be called ti-am65-cpsw-nuss.
+config TI_K3_AM65_CPTS
+ tristate "TI K3 AM65x CPTS"
+ depends on ARCH_K3 && OF
+ depends on PTP_1588_CLOCK
+ help
+ Say y here to support the TI K3 AM65x CPTS with 1588 features such as
+ PTP hardware clock for each CPTS device and network packets
+ timestamping where applicable.
+ Depending on integration CPTS blocks enable compliance with
+ the IEEE 1588-2008 standard for a precision clock synchronization
+ protocol, Ethernet Enhanced Scheduled Traffic Operations (CPTS_ESTFn)
+ and PCIe Subsystem Precision Time Measurement (PTM).
+
+config TI_AM65_CPSW_TAS
+ bool "Enable TAS offload in AM65 CPSW"
+ depends on TI_K3_AM65_CPSW_NUSS && NET_SCH_TAPRIO && TI_K3_AM65_CPTS
+ help
+ Say y here to support Time Aware Shaper(TAS) offload in AM65 CPSW.
+ AM65 CPSW hardware supports Enhanced Scheduled Traffic (EST)
+ defined in IEEE 802.1Q 2018. The EST scheduler runs on CPTS and the
+ TAS/EST schedule is updated in the Fetch RAM memory of the CPSW.
+
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
select TI_DAVINCI_MDIO
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 53792190e9c2..be95512d80b5 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
-ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index c3502aa15ea0..8c4690f3ebcb 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -12,6 +12,7 @@
#include "am65-cpsw-nuss.h"
#include "cpsw_ale.h"
+#include "am65-cpts.h"
#define AM65_CPSW_REGDUMP_VER 0x1
@@ -694,6 +695,27 @@ static void am65_cpsw_get_ethtool_stats(struct net_device *ndev,
hw_stats[i].offset);
}
+static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return ethtool_op_get_ts_info(ndev, info);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = am65_cpts_phc_index(common->cpts);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
@@ -708,9 +730,17 @@ static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int rrobin;
+
+ rrobin = !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+
+ if (common->est_enabled && rrobin) {
+ netdev_err(ndev,
+ "p0-rx-ptype-rrobin flag conflicts with QOS\n");
+ return -EINVAL;
+ }
- common->pf_p0_rx_ptype_rrobin =
- !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+ common->pf_p0_rx_ptype_rrobin = rrobin;
am65_cpsw_nuss_set_p0_ptype(common);
return 0;
@@ -730,7 +760,7 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_sset_count = am65_cpsw_get_sset_count,
.get_strings = am65_cpsw_get_strings,
.get_ethtool_stats = am65_cpsw_get_ethtool_stats,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = am65_cpsw_get_ethtool_ts_info,
.get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
.set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2bf56733ba94..4a8229864ae4 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -30,18 +30,21 @@
#include "cpsw_sl.h"
#include "am65-cpsw-nuss.h"
#include "k3-cppi-desc-pool.h"
+#include "am65-cpts.h"
#define AM65_CPSW_SS_BASE 0x0
#define AM65_CPSW_SGMII_BASE 0x100
#define AM65_CPSW_XGMII_BASE 0x2100
#define AM65_CPSW_CPSW_NU_BASE 0x20000
#define AM65_CPSW_NU_PORTS_BASE 0x1000
+#define AM65_CPSW_NU_FRAM_BASE 0x12000
#define AM65_CPSW_NU_STATS_BASE 0x1a000
#define AM65_CPSW_NU_ALE_BASE 0x1e000
#define AM65_CPSW_NU_CPTS_BASE 0x1d000
#define AM65_CPSW_NU_PORTS_OFFSET 0x1000
#define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
+#define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200
#define AM65_CPSW_MAX_PORTS 8
@@ -187,9 +190,11 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+ am65_cpsw_qos_link_up(ndev, phy->speed);
netif_tx_wake_all_queues(ndev);
} else {
int tmo;
+
/* disable forwarding */
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
@@ -203,6 +208,7 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
cpsw_sl_ctl_reset(port->slave.mac_sl);
+ am65_cpsw_qos_link_down(ndev);
netif_tx_stop_all_queues(ndev);
}
@@ -668,6 +674,18 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ ns = ((u64)psdata[1] << 32) | psdata[0];
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
/* RX psdata[2] word format - checksum information */
#define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
#define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
@@ -745,6 +763,9 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
skb->dev = ndev;
psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* add RX timestamp */
+ if (port->rx_ts_enabled)
+ am65_cpsw_nuss_rx_ts(skb, psdata);
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
@@ -904,6 +925,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
ndev = skb->dev;
+ am65_cpts_tx_timestamp(common->cpts, skb);
+
ndev_priv = netdev_priv(ndev);
stats = this_cpu_ptr(ndev_priv->stats);
u64_stats_update_begin(&stats->syncp);
@@ -995,6 +1018,10 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
/* padding enabled in hw */
pkt_len = skb_headlen(skb);
+ /* SKB TX timestamp */
+ if (port->tx_ts_enabled)
+ am65_cpts_prep_tx_timestamp(common->cpts, skb);
+
q_idx = skb_get_queue_mapping(skb);
dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
@@ -1158,6 +1185,111 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
return 0;
}
+static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
+ struct ifreq *ifr)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
+ struct hwtstamp_config cfg;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* TX HW timestamp */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ port->rx_ts_enabled = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ port->rx_ts_enabled = true;
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
+
+ /* cfg TX timestamp */
+ seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
+ AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
+
+ ts_vlan_ltype = ETH_P_8021Q;
+
+ ts_ctrl_ltype2 = ETH_P_1588 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
+
+ ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
+ AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
+
+ if (port->tx_ts_enabled)
+ ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
+ AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
+
+ writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
+ writel(ts_vlan_ltype, port->port_base +
+ AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
+ writel(ts_ctrl_ltype2, port->port_base +
+ AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
+ writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
+
+ /* en/dis RX timestamp */
+ am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
+ struct ifreq *ifr)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct hwtstamp_config cfg;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = port->tx_ts_enabled ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = port->rx_ts_enabled ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
struct ifreq *req, int cmd)
{
@@ -1166,6 +1298,13 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
if (!netif_running(ndev))
return -EINVAL;
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return am65_cpsw_nuss_hwtstamp_set(ndev, req);
+ case SIOCGHWTSTAMP:
+ return am65_cpsw_nuss_hwtstamp_get(ndev, req);
+ }
+
if (!port->slave.phy)
return -EOPNOTSUPP;
@@ -1244,6 +1383,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
+ .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
};
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
@@ -1531,6 +1671,40 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
return 0;
}
+static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct device_node *node;
+ struct am65_cpts *cpts;
+ void __iomem *reg_base;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return 0;
+
+ node = of_get_child_by_name(dev->of_node, "cpts");
+ if (!node) {
+ dev_err(dev, "%s cpts not found\n", __func__);
+ return -ENOENT;
+ }
+
+ reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
+ cpts = am65_cpts_create(dev, reg_base, node);
+ if (IS_ERR(cpts)) {
+ int ret = PTR_ERR(cpts);
+
+ if (ret == -EOPNOTSUPP) {
+ dev_info(dev, "cpts disabled\n");
+ return 0;
+ }
+
+ dev_err(dev, "cpts create err %d\n", ret);
+ return ret;
+ }
+ common->cpts = cpts;
+
+ return 0;
+}
+
static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
{
struct device_node *node, *port_np;
@@ -1571,6 +1745,9 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
(AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
port->name = of_get_property(port_np, "label", NULL);
+ port->fetch_ram_base =
+ common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
+ (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
port->disabled = !of_device_is_available(port_np);
if (port->disabled)
@@ -1719,7 +1896,8 @@ static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
- 0, tx_chn->tx_chn_name, tx_chn);
+ IRQF_TRIGGER_HIGH,
+ tx_chn->tx_chn_name, tx_chn);
if (ret) {
dev_err(dev, "failure requesting tx%u irq %u, %d\n",
tx_chn->id, tx_chn->irq, ret);
@@ -1744,7 +1922,7 @@ static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
ret = devm_request_irq(dev, common->rx_chns.irq,
am65_cpsw_nuss_rx_irq,
- 0, dev_name(dev), common);
+ IRQF_TRIGGER_HIGH, dev_name(dev), common);
if (ret) {
dev_err(dev, "failure requesting rx irq %u, %d\n",
common->rx_chns.irq, ret);
@@ -1862,10 +2040,21 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return ret;
}
- ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- /* We do not want to force this, as in some cases may not have child */
- if (ret)
- dev_warn(dev, "populating child nodes err:%d\n", ret);
+ node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!node) {
+ dev_warn(dev, "MDIO node not found\n");
+ } else if (of_device_is_available(node)) {
+ struct platform_device *mdio_pdev;
+
+ mdio_pdev = of_platform_device_create(node, NULL, dev);
+ if (!mdio_pdev) {
+ ret = -ENODEV;
+ goto err_pm_clear;
+ }
+
+ common->mdio_dev = &mdio_pdev->dev;
+ }
+ of_node_put(node);
am65_cpsw_nuss_get_ver(common);
@@ -1899,6 +2088,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
goto err_of_clear;
}
+ ret = am65_cpsw_init_cpts(common);
+ if (ret)
+ goto err_of_clear;
+
/* init ports */
for (i = 0; i < common->port_num; i++)
am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
@@ -1917,7 +2110,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return 0;
err_of_clear:
- of_platform_depopulate(dev);
+ of_platform_device_destroy(common->mdio_dev, NULL);
+err_pm_clear:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
@@ -1942,7 +2136,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
- of_platform_depopulate(dev);
+ of_platform_device_destroy(common->mdio_dev, NULL);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 41ae5b4c7931..9faf4fb1409b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -9,6 +9,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include "am65-cpsw-qos.h"
+
+struct am65_cpts;
#define HOST_PORT_NUM 0
@@ -35,8 +40,12 @@ struct am65_cpsw_port {
u32 port_id;
void __iomem *port_base;
void __iomem *stat_base;
+ void __iomem *fetch_ram_base;
bool disabled;
struct am65_cpsw_slave_data slave;
+ bool tx_ts_enabled;
+ bool rx_ts_enabled;
+ struct am65_cpsw_qos qos;
};
struct am65_cpsw_host {
@@ -72,6 +81,7 @@ struct am65_cpsw_pdata {
struct am65_cpsw_common {
struct device *dev;
+ struct device *mdio_dev;
const struct am65_cpsw_pdata *pdata;
void __iomem *ss_base;
@@ -96,8 +106,9 @@ struct am65_cpsw_common {
u32 nuss_ver;
u32 cpsw_ver;
-
bool pf_p0_rx_ptype_rrobin;
+ struct am65_cpts *cpts;
+ int est_enabled;
};
struct am65_cpsw_ndev_stats {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
new file mode 100644
index 000000000000..32eac04468bb
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet QoS submodule
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * quality of service module includes:
+ * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/time.h>
+
+#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-qos.h"
+#include "am65-cpts.h"
+
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+
+/* AM65_CPSW_REG_CTL register fields */
+#define AM65_CPSW_CTL_EST_EN BIT(18)
+
+/* AM65_CPSW_PN_REG_CTL register fields */
+#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
+
+/* AM65_CPSW_PN_REG_EST_CTL register fields */
+#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
+#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
+#define AM65_CPSW_PN_EST_TS_EN BIT(2)
+#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
+#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
+#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
+
+/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
+#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
+#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
+#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
+#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
+#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
+
+/* EST FETCH COMMAND RAM */
+#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
+#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
+#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
+#define AM65_CPSW_FETCH_CNT_OFFSET 8
+#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
+#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+
+enum timer_act {
+ TACT_PROG, /* need program timer */
+ TACT_NEED_STOP, /* need stop first */
+ TACT_SKIP_PROG, /* just buffer can be updated */
+};
+
+static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
+{
+ return port->qos.est_oper || port->qos.est_admin;
+}
+
+static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
+{
+ u32 val;
+
+ val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+
+ if (enable)
+ val |= AM65_CPSW_CTL_EST_EN;
+ else
+ val &= ~AM65_CPSW_CTL_EST_EN;
+
+ writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
+ common->est_enabled = enable;
+}
+
+static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ if (enable)
+ val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
+ else
+ val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
+}
+
+/* target new EST RAM buffer, actual toggle happens after cycle completion */
+static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
+ int buf_num)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ if (buf_num)
+ val |= AM65_CPSW_PN_EST_BUFSEL;
+ else
+ val &= ~AM65_CPSW_PN_EST_BUFSEL;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+}
+
+/* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
+ * admin -> oper or not
+ *
+ * Return true if already transitioned. i.e oper is equal to admin and buf
+ * numbers match (est_oper->buf match with est_admin->buf).
+ * false if before transition. i.e oper is not equal to admin, (i.e a
+ * previous admin command is waiting to be transitioned to oper state
+ * and est_oper->buf not match with est_oper->buf).
+ */
+static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
+ int *admin)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
+ *oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ *admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
+
+ return *admin == *oper;
+}
+
+/* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
+ * Admin to program the new schedule.
+ *
+ * Logic as follows:-
+ * If oper is same as admin, return the other buffer (!oper) as the admin
+ * buffer. If oper is not the same, driver let the current oper to continue
+ * as it is in the process of transitioning from admin -> oper. So keep the
+ * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
+ * EST CTL register. In the second iteration they will match and code returns.
+ * The actual buffer to write command is selected later before it is ready
+ * to update the schedule.
+ */
+static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
+{
+ int oper, admin;
+ int roll = 2;
+
+ while (roll--) {
+ if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
+ return !oper;
+
+ /* admin is not set, so hinder transition as it's not allowed
+ * to touch memory in-flight, by targeting same oper buf.
+ */
+ am65_cpsw_port_est_assign_buf_num(ndev, oper);
+
+ dev_info(&ndev->dev,
+ "Prev. EST admin cycle is in transit %d -> %d\n",
+ oper, admin);
+ }
+
+ return admin;
+}
+
+static void am65_cpsw_admin_to_oper(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (port->qos.est_oper)
+ devm_kfree(&ndev->dev, port->qos.est_oper);
+
+ port->qos.est_oper = port->qos.est_admin;
+ port->qos.est_admin = NULL;
+}
+
+static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ val &= ~AM65_CPSW_PN_EST_ONEBUF;
+ writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+
+ est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
+
+ /* rolled buf num means changed buf while configuring */
+ if (port->qos.est_oper && port->qos.est_admin &&
+ est_new->buf == port->qos.est_oper->buf)
+ am65_cpsw_admin_to_oper(ndev);
+}
+
+static void am65_cpsw_est_set(struct net_device *ndev, int enable)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ int common_enable = 0;
+ int i;
+
+ am65_cpsw_port_est_enable(port, enable);
+
+ for (i = 0; i < common->port_num; i++)
+ common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
+
+ common_enable |= enable;
+ am65_cpsw_est_enable(common, common_enable);
+}
+
+/* This update is supposed to be used in any routine before getting real state
+ * of admin -> oper transition, particularly it's supposed to be used in some
+ * generic routine for providing real state to Taprio Qdisc.
+ */
+static void am65_cpsw_est_update_state(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int oper, admin;
+
+ if (!port->qos.est_admin)
+ return;
+
+ if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
+ return;
+
+ am65_cpsw_admin_to_oper(ndev);
+}
+
+/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
+ * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
+ * bytes/nibbles that can be sent while transmission on given speed.
+ */
+static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
+{
+ u64 temp;
+
+ temp = ns * link_speed;
+ if (link_speed < SPEED_1000)
+ temp <<= 1;
+
+ return DIV_ROUND_UP(temp, 8 * 1000);
+}
+
+static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
+ int fetch_cnt,
+ int fetch_allow)
+{
+ u32 prio_mask, cmd_fetch_cnt, cmd;
+
+ do {
+ if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
+ fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
+ cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
+ } else {
+ cmd_fetch_cnt = fetch_cnt;
+ /* fetch count can't be less than 16? */
+ if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
+ cmd_fetch_cnt = 16;
+
+ fetch_cnt = 0;
+ }
+
+ prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
+ cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
+
+ writel(cmd, addr);
+ addr += 4;
+ } while (fetch_cnt);
+
+ return addr;
+}
+
+static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio,
+ int link_speed)
+{
+ int i, cmd_cnt, cmd_sum = 0;
+ u32 fetch_cnt;
+
+ for (i = 0; i < taprio->num_entries; i++) {
+ if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
+ dev_err(&ndev->dev, "Only SET command is supported");
+ return -EINVAL;
+ }
+
+ fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
+ link_speed);
+
+ cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
+ if (!cmd_cnt)
+ cmd_cnt++;
+
+ cmd_sum += cmd_cnt;
+
+ if (!fetch_cnt)
+ break;
+ }
+
+ return cmd_sum;
+}
+
+static int am65_cpsw_est_check_scheds(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int cmd_num;
+
+ cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
+ port->qos.link_speed);
+ if (cmd_num < 0)
+ return cmd_num;
+
+ if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
+ dev_err(&ndev->dev, "No fetch RAM");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
+ void __iomem *ram_addr, *max_ram_addr;
+ struct tc_taprio_sched_entry *entry;
+ int i, ram_size;
+
+ ram_addr = port->fetch_ram_base;
+ ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
+ ram_addr += est_new->buf * ram_size;
+
+ max_ram_addr = ram_size + ram_addr;
+ for (i = 0; i < est_new->taprio.num_entries; i++) {
+ entry = &est_new->taprio.entries[i];
+
+ fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
+ port->qos.link_speed);
+ fetch_allow = entry->gate_mask;
+ if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
+ dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
+ fetch_allow);
+
+ ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
+ fetch_allow);
+
+ if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
+ dev_info(&ndev->dev,
+ "next scheds after %d have no impact", i + 1);
+ break;
+ }
+
+ all_fetch_allow |= fetch_allow;
+ }
+
+ /* end cmd, enabling non-timed queues for potential over cycle time */
+ if (ram_addr < max_ram_addr)
+ writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
+}
+
+/**
+ * Enable ESTf periodic output, set cycle start time and interval.
+ */
+static int am65_cpsw_timer_set(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ struct am65_cpts *cpts = common->cpts;
+ struct am65_cpts_estf_cfg cfg;
+
+ cfg.ns_period = est_new->taprio.cycle_time;
+ cfg.ns_start = est_new->taprio.base_time;
+
+ return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
+}
+
+static void am65_cpsw_timer_stop(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpts *cpts = port->common->cpts;
+
+ am65_cpts_estf_disable(cpts, port->port_id - 1);
+}
+
+static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpts *cpts = port->common->cpts;
+ u64 cur_time;
+ s64 diff;
+
+ if (!port->qos.est_oper)
+ return TACT_PROG;
+
+ taprio_new = &est_new->taprio;
+ taprio_oper = &port->qos.est_oper->taprio;
+
+ if (taprio_new->cycle_time != taprio_oper->cycle_time)
+ return TACT_NEED_STOP;
+
+ /* in order to avoid timer reset get base_time form oper taprio */
+ if (!taprio_new->base_time && taprio_oper)
+ taprio_new->base_time = taprio_oper->base_time;
+
+ if (taprio_new->base_time == taprio_oper->base_time)
+ return TACT_SKIP_PROG;
+
+ /* base times are cycle synchronized */
+ diff = taprio_new->base_time - taprio_oper->base_time;
+ diff = diff < 0 ? -diff : diff;
+ if (diff % taprio_new->cycle_time)
+ return TACT_NEED_STOP;
+
+ cur_time = am65_cpts_ns_gettime(cpts);
+ if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
+ return TACT_SKIP_PROG;
+
+ /* TODO: Admin schedule at future time is not currently supported */
+ return TACT_NEED_STOP;
+}
+
+static void am65_cpsw_stop_est(struct net_device *ndev)
+{
+ am65_cpsw_est_set(ndev, 0);
+ am65_cpsw_timer_stop(ndev);
+}
+
+static void am65_cpsw_purge_est(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ am65_cpsw_stop_est(ndev);
+
+ if (port->qos.est_admin)
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+
+ if (port->qos.est_oper)
+ devm_kfree(&ndev->dev, port->qos.est_oper);
+
+ port->qos.est_oper = NULL;
+ port->qos.est_admin = NULL;
+}
+
+static int am65_cpsw_configure_taprio(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpts *cpts = common->cpts;
+ int ret = 0, tact = TACT_PROG;
+
+ am65_cpsw_est_update_state(ndev);
+
+ if (!est_new->taprio.enable) {
+ am65_cpsw_stop_est(ndev);
+ return ret;
+ }
+
+ ret = am65_cpsw_est_check_scheds(ndev, est_new);
+ if (ret < 0)
+ return ret;
+
+ tact = am65_cpsw_timer_act(ndev, est_new);
+ if (tact == TACT_NEED_STOP) {
+ dev_err(&ndev->dev,
+ "Can't toggle estf timer, stop taprio first");
+ return -EINVAL;
+ }
+
+ if (tact == TACT_PROG)
+ am65_cpsw_timer_stop(ndev);
+
+ if (!est_new->taprio.base_time)
+ est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
+
+ am65_cpsw_port_est_get_buf_num(ndev, est_new);
+ am65_cpsw_est_set_sched_list(ndev, est_new);
+ am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
+
+ am65_cpsw_est_set(ndev, est_new->taprio.enable);
+
+ if (tact == TACT_PROG) {
+ ret = am65_cpsw_timer_set(ndev, est_new);
+ if (ret) {
+ dev_err(&ndev->dev, "Failed to set cycle time");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
+ struct tc_taprio_qopt_offload *to)
+{
+ int i;
+
+ *to = *from;
+ for (i = 0; i < from->num_entries; i++)
+ to->entries[i] = from->entries[i];
+}
+
+static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct am65_cpsw_est *est_new;
+ size_t size;
+ int ret = 0;
+
+ if (taprio->cycle_time_extension) {
+ dev_err(&ndev->dev, "Failed to set cycle time extension");
+ return -EOPNOTSUPP;
+ }
+
+ size = sizeof(struct tc_taprio_sched_entry) * taprio->num_entries +
+ sizeof(struct am65_cpsw_est);
+
+ est_new = devm_kzalloc(&ndev->dev, size, GFP_KERNEL);
+ if (!est_new)
+ return -ENOMEM;
+
+ am65_cpsw_cp_taprio(taprio, &est_new->taprio);
+ ret = am65_cpsw_configure_taprio(ndev, est_new);
+ if (!ret) {
+ if (taprio->enable) {
+ if (port->qos.est_admin)
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+
+ port->qos.est_admin = est_new;
+ } else {
+ devm_kfree(&ndev->dev, est_new);
+ am65_cpsw_purge_est(ndev);
+ }
+ } else {
+ devm_kfree(&ndev->dev, est_new);
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ ktime_t cur_time;
+ s64 delta;
+
+ port->qos.link_speed = link_speed;
+ if (!am65_cpsw_port_est_enabled(port))
+ return;
+
+ if (port->qos.link_down_time) {
+ cur_time = ktime_get();
+ delta = ktime_us_delta(cur_time, port->qos.link_down_time);
+ if (delta > USEC_PER_SEC) {
+ dev_err(&ndev->dev,
+ "Link has been lost too long, stopping TAS");
+ goto purge_est;
+ }
+ }
+
+ return;
+
+purge_est:
+ am65_cpsw_purge_est(ndev);
+}
+
+static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return -ENODEV;
+
+ if (!netif_running(ndev)) {
+ dev_err(&ndev->dev, "interface is down, link speed unknown\n");
+ return -ENETDOWN;
+ }
+
+ if (common->pf_p0_rx_ptype_rrobin) {
+ dev_err(&ndev->dev,
+ "p0-rx-ptype-rrobin flag conflicts with taprio qdisc\n");
+ return -EINVAL;
+ }
+
+ if (port->qos.link_speed == SPEED_UNKNOWN)
+ return -ENOLINK;
+
+ return am65_cpsw_set_taprio(ndev, type_data);
+}
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return am65_cpsw_setup_taprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
+
+ am65_cpsw_est_link_up(ndev, link_speed);
+ port->qos.link_down_time = 0;
+}
+
+void am65_cpsw_qos_link_down(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
+
+ if (!port->qos.link_down_time)
+ port->qos.link_down_time = ktime_get();
+
+ port->qos.link_speed = SPEED_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
new file mode 100644
index 000000000000..e8f1b6b59e93
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef AM65_CPSW_QOS_H_
+#define AM65_CPSW_QOS_H_
+
+#include <linux/netdevice.h>
+#include <net/pkt_sched.h>
+
+struct am65_cpsw_est {
+ int buf;
+ /* has to be the last one */
+ struct tc_taprio_qopt_offload taprio;
+};
+
+struct am65_cpsw_qos {
+ struct am65_cpsw_est *est_admin;
+ struct am65_cpsw_est *est_oper;
+ ktime_t link_down_time;
+ int link_speed;
+};
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed);
+void am65_cpsw_qos_link_down(struct net_device *ndev);
+
+#endif /* AM65_CPSW_QOS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
new file mode 100644
index 000000000000..c59a289e428c
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI K3 AM65x Common Platform Time Sync
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "am65-cpts.h"
+
+struct am65_genf_regs {
+ u32 comp_lo; /* Comparison Low Value 0:31 */
+ u32 comp_hi; /* Comparison High Value 32:63 */
+ u32 control; /* control */
+ u32 length; /* Length */
+ u32 ppm_low; /* PPM Load Low Value 0:31 */
+ u32 ppm_hi; /* PPM Load High Value 32:63 */
+ u32 ts_nudge; /* Nudge value */
+} __aligned(32) __packed;
+
+#define AM65_CPTS_GENF_MAX_NUM 9
+#define AM65_CPTS_ESTF_MAX_NUM 8
+
+struct am65_cpts_regs {
+ u32 idver; /* Identification and version */
+ u32 control; /* Time sync control */
+ u32 rftclk_sel; /* Reference Clock Select Register */
+ u32 ts_push; /* Time stamp event push */
+ u32 ts_load_val_lo; /* Time Stamp Load Low Value 0:31 */
+ u32 ts_load_en; /* Time stamp load enable */
+ u32 ts_comp_lo; /* Time Stamp Comparison Low Value 0:31 */
+ u32 ts_comp_length; /* Time Stamp Comparison Length */
+ u32 intstat_raw; /* Time sync interrupt status raw */
+ u32 intstat_masked; /* Time sync interrupt status masked */
+ u32 int_enable; /* Time sync interrupt enable */
+ u32 ts_comp_nudge; /* Time Stamp Comparison Nudge Value */
+ u32 event_pop; /* Event interrupt pop */
+ u32 event_0; /* Event Time Stamp lo 0:31 */
+ u32 event_1; /* Event Type Fields */
+ u32 event_2; /* Event Type Fields domain */
+ u32 event_3; /* Event Time Stamp hi 32:63 */
+ u32 ts_load_val_hi; /* Time Stamp Load High Value 32:63 */
+ u32 ts_comp_hi; /* Time Stamp Comparison High Value 32:63 */
+ u32 ts_add_val; /* Time Stamp Add value */
+ u32 ts_ppm_low; /* Time Stamp PPM Load Low Value 0:31 */
+ u32 ts_ppm_hi; /* Time Stamp PPM Load High Value 32:63 */
+ u32 ts_nudge; /* Time Stamp Nudge value */
+ u32 reserv[33];
+ struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM];
+ struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM];
+};
+
+/* CONTROL_REG */
+#define AM65_CPTS_CONTROL_EN BIT(0)
+#define AM65_CPTS_CONTROL_INT_TEST BIT(1)
+#define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2)
+#define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3)
+#define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4)
+#define AM65_CPTS_CONTROL_64MODE BIT(5)
+#define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6)
+#define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7)
+#define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8)
+#define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9)
+#define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10)
+#define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11)
+#define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12)
+#define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13)
+#define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14)
+#define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
+#define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
+
+#define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
+#define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
+
+/* RFTCLK_SEL_REG */
+#define AM65_CPTS_RFTCLK_SEL_MASK (0x1F)
+
+/* TS_PUSH_REG */
+#define AM65_CPTS_TS_PUSH BIT(0)
+
+/* TS_LOAD_EN_REG */
+#define AM65_CPTS_TS_LOAD_EN BIT(0)
+
+/* INTSTAT_RAW_REG */
+#define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0)
+
+/* INTSTAT_MASKED_REG */
+#define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0)
+
+/* INT_ENABLE_REG */
+#define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0)
+
+/* TS_COMP_NUDGE_REG */
+#define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF)
+
+/* EVENT_POP_REG */
+#define AM65_CPTS_EVENT_POP BIT(0)
+
+/* EVENT_1_REG */
+#define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0)
+
+#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16)
+#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16)
+
+#define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20)
+#define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20)
+
+#define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24)
+#define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24)
+
+/* EVENT_2_REG */
+#define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF)
+#define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0)
+
+enum {
+ AM65_CPTS_EV_PUSH, /* Time Stamp Push Event */
+ AM65_CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+ AM65_CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+ AM65_CPTS_EV_HW, /* Hardware Time Stamp Push Event */
+ AM65_CPTS_EV_RX, /* Ethernet Receive Event */
+ AM65_CPTS_EV_TX, /* Ethernet Transmit Event */
+ AM65_CPTS_EV_TS_COMP, /* Time Stamp Compare Event */
+ AM65_CPTS_EV_HOST, /* Host Transmit Event */
+};
+
+struct am65_cpts_event {
+ struct list_head list;
+ unsigned long tmo;
+ u32 event1;
+ u32 event2;
+ u64 timestamp;
+};
+
+#define AM65_CPTS_FIFO_DEPTH (16)
+#define AM65_CPTS_MAX_EVENTS (32)
+#define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20) /* ms */
+#define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+#define AM65_CPTS_MIN_PPM 0x400
+
+struct am65_cpts {
+ struct device *dev;
+ struct am65_cpts_regs __iomem *reg;
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ int phc_index;
+ struct clk_hw *clk_mux_hw;
+ struct device_node *clk_mux_np;
+ struct clk *refclk;
+ u32 refclk_freq;
+ struct list_head events;
+ struct list_head pool;
+ struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
+ spinlock_t lock; /* protects events lists*/
+ u32 ext_ts_inputs;
+ u32 genf_num;
+ u32 ts_add_val;
+ int irq;
+ struct mutex ptp_clk_lock; /* PHC access sync */
+ u64 timestamp;
+ u32 genf_enable;
+ u32 hw_ts_enable;
+ struct sk_buff_head txq;
+};
+
+struct am65_cpts_skb_cb_data {
+ unsigned long tmo;
+ u32 skb_mtype_seqid;
+};
+
+#define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
+#define am65_cpts_read32(c, r) readl(&(c)->reg->r)
+
+static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
+{
+ u32 val;
+
+ val = upper_32_bits(start_tstamp);
+ am65_cpts_write32(cpts, val, ts_load_val_hi);
+ val = lower_32_bits(start_tstamp);
+ am65_cpts_write32(cpts, val, ts_load_val_lo);
+
+ am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
+}
+
+static void am65_cpts_set_add_val(struct am65_cpts *cpts)
+{
+ /* select coefficient according to the rate */
+ cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
+
+ am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
+}
+
+static void am65_cpts_disable(struct am65_cpts *cpts)
+{
+ am65_cpts_write32(cpts, 0, control);
+ am65_cpts_write32(cpts, 0, int_enable);
+}
+
+static int am65_cpts_event_get_port(struct am65_cpts_event *event)
+{
+ return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
+ AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
+}
+
+static int am65_cpts_event_get_type(struct am65_cpts_event *event)
+{
+ return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >>
+ AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
+}
+
+static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
+ return removed ? 0 : -1;
+}
+
+static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
+ struct am65_cpts_event *event)
+{
+ u32 r = am65_cpts_read32(cpts, intstat_raw);
+
+ if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) {
+ event->timestamp = am65_cpts_read32(cpts, event_0);
+ event->event1 = am65_cpts_read32(cpts, event_1);
+ event->event2 = am65_cpts_read32(cpts, event_2);
+ event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
+ am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
+ return false;
+ }
+ return true;
+}
+
+static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+{
+ struct ptp_clock_event pevent;
+ struct am65_cpts_event *event;
+ bool schedule = false;
+ int i, type, ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
+ event = list_first_entry_or_null(&cpts->pool,
+ struct am65_cpts_event, list);
+
+ if (!event) {
+ if (am65_cpts_cpts_purge_events(cpts)) {
+ dev_err(cpts->dev, "cpts: event pool empty\n");
+ ret = -1;
+ goto out;
+ }
+ continue;
+ }
+
+ if (am65_cpts_fifo_pop_event(cpts, event))
+ break;
+
+ type = am65_cpts_event_get_type(event);
+ switch (type) {
+ case AM65_CPTS_EV_PUSH:
+ cpts->timestamp = event->timestamp;
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
+ cpts->timestamp);
+ break;
+ case AM65_CPTS_EV_RX:
+ case AM65_CPTS_EV_TX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_del_init(&event->list);
+ list_add_tail(&event->list, &cpts->events);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ schedule = true;
+ break;
+ case AM65_CPTS_EV_HW:
+ pevent.index = am65_cpts_event_get_port(event) - 1;
+ pevent.timestamp = event->timestamp;
+ pevent.type = PTP_CLOCK_EXTTS;
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
+ pevent.index, event->timestamp);
+
+ ptp_clock_event(cpts->ptp_clock, &pevent);
+ break;
+ case AM65_CPTS_EV_HOST:
+ break;
+ case AM65_CPTS_EV_ROLL:
+ case AM65_CPTS_EV_HALF:
+ case AM65_CPTS_EV_TS_COMP:
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
+ type,
+ event->event1, event->event2,
+ event->timestamp);
+ break;
+ default:
+ dev_err(cpts->dev, "cpts: unknown event type\n");
+ ret = -1;
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ if (schedule)
+ ptp_schedule_worker(cpts->ptp_clock, 0);
+
+ return ret;
+}
+
+static u64 am65_cpts_gettime(struct am65_cpts *cpts,
+ struct ptp_system_timestamp *sts)
+{
+ unsigned long flags;
+ u64 val = 0;
+
+ /* temporarily disable cpts interrupt to avoid intentional
+ * doubled read. Interrupt can be in-flight - it's Ok.
+ */
+ am65_cpts_write32(cpts, 0, int_enable);
+
+ /* use spin_lock_irqsave() here as it has to run very fast */
+ spin_lock_irqsave(&cpts->lock, flags);
+ ptp_read_system_prets(sts);
+ am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
+ am65_cpts_read32(cpts, ts_push);
+ ptp_read_system_postts(sts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ am65_cpts_fifo_read(cpts);
+
+ am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
+
+ val = cpts->timestamp;
+
+ return val;
+}
+
+static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
+{
+ struct am65_cpts *cpts = dev_id;
+
+ if (am65_cpts_fifo_read(cpts))
+ dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
+
+ return IRQ_HANDLED;
+}
+
+/* PTP clock operations */
+static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ int neg_adj = 0;
+ u64 adj_period;
+ u32 val;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ /* base freq = 1GHz = 1 000 000 000
+ * ppb_norm = ppb * base_freq / clock_freq;
+ * ppm_norm = ppb_norm / 1000
+ * adj_period = 1 000 000 / ppm_norm
+ * adj_period = 1 000 000 000 / ppb_norm
+ * adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq)
+ * adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq)
+ * adj_period = clock_freq / ppb
+ */
+ adj_period = div_u64(cpts->refclk_freq, ppb);
+
+ mutex_lock(&cpts->ptp_clk_lock);
+
+ val = am65_cpts_read32(cpts, control);
+ if (neg_adj)
+ val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
+ else
+ val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
+ am65_cpts_write32(cpts, val, control);
+
+ val = upper_32_bits(adj_period) & 0x3FF;
+ am65_cpts_write32(cpts, val, ts_ppm_hi);
+ val = lower_32_bits(adj_period);
+ am65_cpts_write32(cpts, val, ts_ppm_low);
+
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ s64 ns;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, NULL);
+ ns += delta;
+ am65_cpts_settime(cpts, ns);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u64 ns;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, sts);
+ mutex_unlock(&cpts->ptp_clk_lock);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
+{
+ u64 ns;
+
+ /* reuse ptp_clk_lock as it serialize ts push */
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, NULL);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return ns;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime);
+
+static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_settime(cpts, ns);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
+{
+ u32 v;
+
+ v = am65_cpts_read32(cpts, control);
+ if (on) {
+ v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
+ cpts->hw_ts_enable |= BIT(index);
+ } else {
+ v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
+ cpts->hw_ts_enable &= ~BIT(index);
+ }
+ am65_cpts_write32(cpts, v, control);
+}
+
+static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
+{
+ if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_extts_enable_hw(cpts, index, on);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
+ __func__, index, on ? "enabled" : "disabled");
+
+ return 0;
+}
+
+int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg)
+{
+ u64 cycles;
+ u32 val;
+
+ cycles = cfg->ns_period * cpts->refclk_freq;
+ cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC);
+ if (cycles > U32_MAX)
+ return -EINVAL;
+
+ /* according to TRM should be zeroed */
+ am65_cpts_write32(cpts, 0, estf[idx].length);
+
+ val = upper_32_bits(cfg->ns_start);
+ am65_cpts_write32(cpts, val, estf[idx].comp_hi);
+ val = lower_32_bits(cfg->ns_start);
+ am65_cpts_write32(cpts, val, estf[idx].comp_lo);
+ val = lower_32_bits(cycles);
+ am65_cpts_write32(cpts, val, estf[idx].length);
+
+ dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
+
+void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
+{
+ am65_cpts_write32(cpts, 0, estf[idx].length);
+
+ dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_estf_disable);
+
+static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
+ struct ptp_perout_request *req, int on)
+{
+ u64 ns_period, ns_start, cycles;
+ struct timespec64 ts;
+ u32 val;
+
+ if (on) {
+ ts.tv_sec = req->period.sec;
+ ts.tv_nsec = req->period.nsec;
+ ns_period = timespec64_to_ns(&ts);
+
+ cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
+
+ ts.tv_sec = req->start.sec;
+ ts.tv_nsec = req->start.nsec;
+ ns_start = timespec64_to_ns(&ts);
+
+ val = upper_32_bits(ns_start);
+ am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
+ val = lower_32_bits(ns_start);
+ am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
+ val = lower_32_bits(cycles);
+ am65_cpts_write32(cpts, val, genf[req->index].length);
+
+ cpts->genf_enable |= BIT(req->index);
+ } else {
+ am65_cpts_write32(cpts, 0, genf[req->index].length);
+
+ cpts->genf_enable &= ~BIT(req->index);
+ }
+}
+
+static int am65_cpts_perout_enable(struct am65_cpts *cpts,
+ struct ptp_perout_request *req, int on)
+{
+ if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_perout_enable_hw(cpts, req, on);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
+ __func__, req->index, on ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return am65_cpts_extts_enable(cpts, rq->extts.index, on);
+ case PTP_CLK_REQ_PEROUT:
+ return am65_cpts_perout_enable(cpts, &rq->perout, on);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
+
+static struct ptp_clock_info am65_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "CTPS timer",
+ .adjfreq = am65_cpts_ptp_adjfreq,
+ .adjtime = am65_cpts_ptp_adjtime,
+ .gettimex64 = am65_cpts_ptp_gettimex,
+ .settime64 = am65_cpts_ptp_settime,
+ .enable = am65_cpts_ptp_enable,
+ .do_aux_work = am65_cpts_ts_work,
+};
+
+static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
+ struct am65_cpts_event *event)
+{
+ struct sk_buff_head txq_list;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ bool found = false;
+ u32 mtype_seqid;
+
+ mtype_seqid = event->event1 &
+ (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
+ AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
+ AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+
+ __skb_queue_head_init(&txq_list);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice_init(&cpts->txq, &txq_list);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ /* no need to grab txq.lock as access is always done under cpts->lock */
+ skb_queue_walk_safe(&txq_list, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ struct am65_cpts_skb_cb_data *skb_cb =
+ (struct am65_cpts_skb_cb_data *)skb->cb;
+
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ u64 ns = event->timestamp;
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev,
+ "match tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 100 ms */
+ dev_dbg(cpts->dev,
+ "expiring tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice(&txq_list, &cpts->txq);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return found;
+}
+
+static void am65_cpts_find_ts(struct am65_cpts *cpts)
+{
+ struct am65_cpts_event *event;
+ struct list_head *this, *next;
+ LIST_HEAD(events_free);
+ unsigned long flags;
+ LIST_HEAD(events);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_init(&cpts->events, &events);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ list_for_each_safe(this, next, &events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (am65_cpts_match_tx_ts(cpts, event) ||
+ time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &events_free);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events_free, &cpts->pool);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+}
+
+static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ unsigned long flags;
+ long delay = -1;
+
+ am65_cpts_find_ts(cpts);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT;
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return delay;
+}
+
+/**
+ * am65_cpts_rx_enable - enable rx timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions enables rx packets timestamping. The CPTS can timestamp all
+ * rx packets.
+ */
+void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+{
+ u32 val;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ val = am65_cpts_read32(cpts, control);
+ if (en)
+ val |= AM65_CPTS_CONTROL_TSTAMP_EN;
+ else
+ val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
+ am65_cpts_write32(cpts, val, control);
+ mutex_unlock(&cpts->ptp_clk_lock);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
+
+static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
+{
+ unsigned int ptp_class = ptp_classify_raw(skb);
+ u8 *msgtype, *data = skb->data;
+ unsigned int offset = 0;
+ __be16 *seqid;
+
+ if (ptp_class == PTP_CLASS_NONE)
+ return 0;
+
+ if (ptp_class & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (ptp_class & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return 0;
+ }
+
+ if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ return 0;
+
+ if (unlikely(ptp_class & PTP_CLASS_V1))
+ msgtype = data + offset + OFF_PTP_CONTROL;
+ else
+ msgtype = data + offset;
+
+ seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+ *mtype_seqid = (*msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
+ AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
+ *mtype_seqid |= (ntohs(*seqid) & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+
+ return 1;
+}
+
+/**
+ * am65_cpts_tx_timestamp - save tx packet for timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions saves tx packet for timestamping if packet can be timestamped.
+ * The future processing is done in from PTP auxiliary worker.
+ */
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ return;
+
+ /* add frame to queue for processing later.
+ * The periodic FIFO check will handle this.
+ */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+ skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->ptp_clock, 0);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
+
+/**
+ * am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions should be called from .xmit().
+ * It checks if packet can be timestamped, fills internal cpts data
+ * in skb-cb and marks packet as SKBTX_IN_PROGRESS.
+ */
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+ int ret;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return;
+
+ ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return;
+ skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
+ AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp);
+
+int am65_cpts_phc_index(struct am65_cpts *cpts)
+{
+ return cpts->phc_index;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_phc_index);
+
+static void cpts_free_clk_mux(void *data)
+{
+ struct am65_cpts *cpts = data;
+
+ of_clk_del_provider(cpts->clk_mux_np);
+ clk_hw_unregister_mux(cpts->clk_mux_hw);
+ of_node_put(cpts->clk_mux_np);
+}
+
+static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
+ struct device_node *node)
+{
+ unsigned int num_parents;
+ const char **parent_names;
+ char *clk_mux_name;
+ void __iomem *reg;
+ int ret = -EINVAL;
+
+ cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
+ if (!cpts->clk_mux_np)
+ return 0;
+
+ num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
+ if (num_parents < 1) {
+ dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
+ cpts->clk_mux_np);
+ goto mux_fail;
+ }
+
+ parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
+ GFP_KERNEL);
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
+
+ clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
+ dev_name(cpts->dev), cpts->clk_mux_np);
+ if (!clk_mux_name) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ reg = &cpts->reg->rftclk_sel;
+ /* dev must be NULL to avoid recursive incrementing
+ * of module refcnt
+ */
+ cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
+ parent_names, num_parents,
+ 0, reg, 0, 5, 0, NULL);
+ if (IS_ERR(cpts->clk_mux_hw)) {
+ ret = PTR_ERR(cpts->clk_mux_hw);
+ goto mux_fail;
+ }
+
+ ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
+ cpts->clk_mux_hw);
+ if (ret)
+ goto clk_hw_register;
+
+ ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
+ if (ret)
+ dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
+
+ return ret;
+
+clk_hw_register:
+ clk_hw_unregister_mux(cpts->clk_mux_hw);
+mux_fail:
+ of_node_put(cpts->clk_mux_np);
+ return ret;
+}
+
+static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
+{
+ u32 prop[2];
+
+ if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
+ cpts->ext_ts_inputs = prop[0];
+
+ if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
+ cpts->genf_num = prop[0];
+
+ return cpts_of_mux_clk_setup(cpts, node);
+}
+
+static void am65_cpts_release(void *data)
+{
+ struct am65_cpts *cpts = data;
+
+ ptp_clock_unregister(cpts->ptp_clock);
+ am65_cpts_disable(cpts);
+ clk_disable_unprepare(cpts->refclk);
+}
+
+struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node)
+{
+ struct am65_cpts *cpts;
+ int ret, i;
+
+ cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
+ if (!cpts)
+ return ERR_PTR(-ENOMEM);
+
+ cpts->dev = dev;
+ cpts->reg = (struct am65_cpts_regs __iomem *)regs;
+
+ cpts->irq = of_irq_get_byname(node, "cpts");
+ if (cpts->irq <= 0) {
+ ret = cpts->irq ?: -ENXIO;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get IRQ number (err = %d)\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = am65_cpts_of_parse(cpts, node);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_init(&cpts->ptp_clk_lock);
+ INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->pool);
+ spin_lock_init(&cpts->lock);
+ skb_queue_head_init(&cpts->txq);
+
+ for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++)
+ list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+ cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
+ if (IS_ERR(cpts->refclk)) {
+ ret = PTR_ERR(cpts->refclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get refclk %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(cpts->refclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable refclk %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ cpts->refclk_freq = clk_get_rate(cpts->refclk);
+
+ am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
+ cpts->ptp_info = am65_ptp_info;
+
+ if (cpts->ext_ts_inputs)
+ cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
+ if (cpts->genf_num)
+ cpts->ptp_info.n_per_out = cpts->genf_num;
+
+ am65_cpts_set_add_val(cpts);
+
+ am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN | AM65_CPTS_CONTROL_64MODE,
+ control);
+ am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
+
+ /* set time to the current system time */
+ am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
+
+ cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
+ if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
+ dev_err(dev, "Failed to register ptp clk %ld\n",
+ PTR_ERR(cpts->ptp_clock));
+ if (!cpts->ptp_clock)
+ ret = -ENODEV;
+ goto refclk_disable;
+ }
+ cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
+
+ ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
+ if (ret) {
+ dev_err(dev, "failed to add ptpclk reset action %d", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
+ am65_cpts_interrupt,
+ IRQF_ONESHOT, dev_name(dev), cpts);
+ if (ret < 0) {
+ dev_err(cpts->dev, "error attaching irq %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
+ am65_cpts_read32(cpts, idver),
+ cpts->refclk_freq, cpts->ts_add_val);
+
+ return cpts;
+
+refclk_disable:
+ clk_disable_unprepare(cpts->refclk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_create);
+
+static int am65_cpts_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct am65_cpts *cpts;
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpts");
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ cpts = am65_cpts_create(dev, base, node);
+ return PTR_ERR_OR_ZERO(cpts);
+}
+
+static const struct of_device_id am65_cpts_of_match[] = {
+ { .compatible = "ti,am65-cpts", },
+ { .compatible = "ti,j721e-cpts", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, am65_cpts_of_match);
+
+static struct platform_driver am65_cpts_driver = {
+ .probe = am65_cpts_probe,
+ .driver = {
+ .name = "am65-cpts",
+ .of_match_table = am65_cpts_of_match,
+ },
+};
+module_platform_driver(am65_cpts_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
+MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
new file mode 100644
index 000000000000..98c1960b20b9
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* TI K3 AM65 CPTS driver interface
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_CPTS_H_
+#define K3_CPTS_H_
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct am65_cpts;
+
+struct am65_cpts_estf_cfg {
+ u64 ns_period;
+ u64 ns_start;
+};
+
+#if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
+struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node);
+int am65_cpts_phc_index(struct am65_cpts *cpts);
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en);
+u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
+int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg);
+void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
+#else
+static inline struct am65_cpts *am65_cpts_create(struct device *dev,
+ void __iomem *regs,
+ struct device_node *node)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int am65_cpts_phc_index(struct am65_cpts *cpts)
+{
+ return -1;
+}
+
+static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
+{
+}
+
+static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
+{
+}
+
+static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+{
+}
+
+static s64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
+{
+ return 0;
+}
+
+static int am65_cpts_estf_enable(struct am65_cpts *cpts,
+ int idx, struct am65_cpts_estf_cfg *cfg)
+{
+ return 0;
+}
+
+static void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
+{
+}
+#endif
+
+#endif /* K3_CPTS_H_ */
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index a530afe3ce12..c20715107075 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -532,7 +532,7 @@ fatal_error:
}
-static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int queue;
unsigned int len;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 33c8dd686206..dce49311d3d3 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1228,7 +1228,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
data->active_slave = 0;
data->channels = CPSW_MAX_QUEUES;
data->ale_entries = CPSW_ALE_NUM_ENTRIES;
- data->dual_emac = 1;
+ data->dual_emac = true;
data->bd_ram_size = CPSW_BD_RAM_SIZE;
data->mac_control = 0;
@@ -1921,7 +1921,7 @@ static int cpsw_probe(struct platform_device *pdev)
soc = soc_device_match(cpsw_soc_devices);
if (soc)
- cpsw->quirk_irq = 1;
+ cpsw->quirk_irq = true;
cpsw->rx_packet_max = rx_packet_max;
cpsw->descs_pool_size = descs_pool_size;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index b50c3ec3495b..6bcda20ed7e7 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -643,7 +643,7 @@ static int tc_mii_probe(struct net_device *dev)
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
}
- linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_andnot(phydev->supported, phydev->supported, mask);
linkmode_copy(phydev->advertising, phydev->supported);
lp->link = 0;
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index a962097b58c6..6cff5f7d57c4 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
+ depends on PCI || ARCH_VT8500 || COMPILE_TEST
depends on HAS_DMA
select CRC32
select MII