aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic/qede
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic/qede')
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h238
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c726
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c254
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c250
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c631
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c87
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h33
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c57
10 files changed, 1361 insertions, 926 deletions
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 3fc91d12413f..a6e8d9fc8832 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -1,4 +1,6 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+# Copyright (c) 2019-2020 Marvell International Ltd.
+
obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 234c6f30effb..f90dcfe9ee68 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qede NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#ifndef _QEDE_H_
#define _QEDE_H_
#include <linux/compiler.h>
@@ -55,15 +30,6 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
-#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 37
-#define QEDE_REVISION_VERSION 0
-#define QEDE_ENGINEERING_VERSION 20
-#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
- __stringify(QEDE_MINOR_VERSION) "." \
- __stringify(QEDE_REVISION_VERSION) "." \
- __stringify(QEDE_ENGINEERING_VERSION)
-
#define DRV_MODULE_SYM qede
struct qede_stats_common {
@@ -193,44 +159,55 @@ struct qede_dump_info {
u32 args[QEDE_DUMP_MAX_ARGS];
};
+struct qede_coalesce {
+ bool isvalid;
+ u16 rxc;
+ u16 txc;
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
struct pci_dev *pdev;
+ struct devlink *devlink;
u32 dp_module;
u8 dp_level;
- unsigned long flags;
-#define IS_VF(edev) (test_bit(QEDE_FLAGS_IS_VF, &(edev)->flags))
+ unsigned long flags;
+#define IS_VF(edev) test_bit(QEDE_FLAGS_IS_VF, \
+ &(edev)->flags)
const struct qed_eth_ops *ops;
struct qede_ptp *ptp;
u64 ptp_skip_txts;
- struct qed_dev_eth_info dev_info;
-#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
-#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
+ struct qed_dev_eth_info dev_info;
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
#define QEDE_IS_BB(edev) \
((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
#define QEDE_IS_AH(edev) \
((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
struct qede_fastpath *fp_array;
+ struct qede_coalesce *coal_entry;
u8 req_num_tx;
u8 fp_num_tx;
u8 req_num_rx;
u8 fp_num_rx;
u16 req_queues;
u16 num_queues;
-#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
-#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
+ u16 total_xdp_queues;
+
+#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
+#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
#define QEDE_RX_QUEUE_IDX(edev, i) (i)
-#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
+#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
struct qed_int_info int_info;
- /* Smaller private varaiant of the RTNL lock */
+ /* Smaller private variant of the RTNL lock */
struct mutex qede_lock;
u32 state; /* Protected by qede_lock */
u16 rx_buf_size;
@@ -251,22 +228,28 @@ struct qede_dev {
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
struct qede_stats stats;
-#define QEDE_RSS_INDIR_INITED BIT(0)
-#define QEDE_RSS_KEY_INITED BIT(1)
-#define QEDE_RSS_CAPS_INITED BIT(2)
- u32 rss_params_inited; /* bit-field to track initialized rss params */
- u16 rss_ind_table[128];
- u32 rss_key[10];
- u8 rss_caps;
-
- u16 q_num_rx_buffers; /* Must be a power of two */
- u16 q_num_tx_buffers; /* Must be a power of two */
-
- bool gro_disable;
- struct list_head vlan_list;
- u16 configured_vlans;
- u16 non_configured_vlans;
- bool accept_any_vlan;
+
+ /* Bitfield to track initialized RSS params */
+ u32 rss_params_inited;
+#define QEDE_RSS_INDIR_INITED BIT(0)
+#define QEDE_RSS_KEY_INITED BIT(1)
+#define QEDE_RSS_CAPS_INITED BIT(2)
+
+ u16 rss_ind_table[128];
+ u32 rss_key[10];
+ u8 rss_caps;
+
+ /* Both must be a power of two */
+ u16 q_num_rx_buffers;
+ u16 q_num_tx_buffers;
+
+ bool gro_disable;
+
+ struct list_head vlan_list;
+ u16 configured_vlans;
+ u16 non_configured_vlans;
+ bool accept_any_vlan;
+
struct delayed_work sp_task;
unsigned long sp_flags;
u16 vxlan_dst_port;
@@ -277,7 +260,16 @@ struct qede_dev {
struct qede_rdma_dev rdma_info;
- struct bpf_prog *xdp_prog;
+ struct bpf_prog *xdp_prog;
+
+ enum qed_hw_err_type last_err_type;
+ unsigned long err_flags;
+#define QEDE_ERR_IS_HANDLED 31
+#define QEDE_ERR_ATTN_CLR_EN 0
+#define QEDE_ERR_GET_DBG_INFO 1
+#define QEDE_ERR_IS_RECOVERABLE 2
+#define QEDE_ERR_WARN 3
+
struct qede_dump_info dump_info;
};
@@ -389,29 +381,34 @@ struct sw_tx_bd {
};
struct sw_tx_xdp {
- struct page *page;
- dma_addr_t mapping;
+ struct page *page;
+ struct xdp_frame *xdpf;
+ dma_addr_t mapping;
};
struct qede_tx_queue {
- u8 is_xdp;
- bool is_legacy;
- u16 sw_tx_cons;
- u16 sw_tx_prod;
- u16 num_tx_buffers; /* Slowpath only */
+ u8 is_xdp;
+ bool is_legacy;
+ u16 sw_tx_cons;
+ u16 sw_tx_prod;
+ u16 num_tx_buffers; /* Slowpath only */
- u64 xmit_pkts;
- u64 stopped_cnt;
- u64 tx_mem_alloc_err;
+ u64 xmit_pkts;
+ u64 stopped_cnt;
+ u64 tx_mem_alloc_err;
- __le16 *hw_cons_ptr;
+ __le16 *hw_cons_ptr;
/* Needed for the mapping of packets */
- struct device *dev;
+ struct device *dev;
- void __iomem *doorbell_addr;
- union db_prod tx_db;
- int index; /* Slowpath only */
+ void __iomem *doorbell_addr;
+ union db_prod tx_db;
+
+ /* Spinlock for XDP queues in case of XDP_REDIRECT */
+ spinlock_t xdp_tx_lock;
+
+ int index; /* Slowpath only */
#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
QEDE_MAX_TSS_CNT(edev))
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
@@ -423,22 +420,22 @@ struct qede_tx_queue {
#define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \
(&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
[QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
-#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0]))
+#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0]))
/* Regular Tx requires skb + metadata for release purpose,
* while XDP requires the pages and the mapped address.
*/
union {
- struct sw_tx_bd *skbs;
- struct sw_tx_xdp *xdp;
- } sw_tx_ring;
+ struct sw_tx_bd *skbs;
+ struct sw_tx_xdp *xdp;
+ } sw_tx_ring;
- struct qed_chain tx_pbl;
+ struct qed_chain tx_pbl;
/* Slowpath; Should be kept in end [unless missing padding] */
- void *handle;
- u16 cos;
- u16 ndev_txq_id;
+ void *handle;
+ u16 cos;
+ u16 ndev_txq_id;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -452,32 +449,37 @@ struct qede_tx_queue {
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
struct qede_fastpath {
- struct qede_dev *edev;
-#define QEDE_FASTPATH_TX BIT(0)
-#define QEDE_FASTPATH_RX BIT(1)
-#define QEDE_FASTPATH_XDP BIT(2)
-#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
- u8 type;
- u8 id;
- u8 xdp_xmit;
- struct napi_struct napi;
- struct qed_sb_info *sb_info;
- struct qede_rx_queue *rxq;
- struct qede_tx_queue *txq;
- struct qede_tx_queue *xdp_tx;
-
-#define VEC_NAME_SIZE (sizeof_field(struct net_device, name) + 8)
- char name[VEC_NAME_SIZE];
+ struct qede_dev *edev;
+
+ u8 type;
+#define QEDE_FASTPATH_TX BIT(0)
+#define QEDE_FASTPATH_RX BIT(1)
+#define QEDE_FASTPATH_XDP BIT(2)
+#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
+
+ u8 id;
+
+ u8 xdp_xmit;
+#define QEDE_XDP_TX BIT(0)
+#define QEDE_XDP_REDIRECT BIT(1)
+
+ struct napi_struct napi;
+ struct qed_sb_info *sb_info;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txq;
+ struct qede_tx_queue *xdp_tx;
+
+ char name[IFNAMSIZ + 8];
};
/* Debug print definitions */
-#define DP_NAME(edev) ((edev)->ndev->name)
+#define DP_NAME(edev) netdev_name((edev)->ndev)
-#define XMIT_PLAIN 0
-#define XMIT_L4_CSUM BIT(0)
-#define XMIT_LSO BIT(1)
-#define XMIT_ENC BIT(2)
-#define XMIT_ENC_GSO_L4_CSUM BIT(3)
+#define XMIT_PLAIN 0
+#define XMIT_L4_CSUM BIT(0)
+#define XMIT_LSO BIT(1)
+#define XMIT_ENC BIT(2)
+#define XMIT_ENC_GSO_L4_CSUM BIT(3)
#define QEDE_CSUM_ERROR BIT(0)
#define QEDE_CSUM_UNNECESSARY BIT(1)
@@ -485,11 +487,16 @@ struct qede_fastpath {
#define QEDE_SP_RECOVERY 0
#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RSVD1 2
+#define QEDE_SP_RSVD2 3
+#define QEDE_SP_HW_ERR 4
+#define QEDE_SP_ARFS_CONFIG 5
+#define QEDE_SP_AER 7
+#define QEDE_SP_DISABLE 8
#ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-#define QEDE_SP_ARFS_CONFIG 4
#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
#endif
@@ -516,12 +523,13 @@ struct qede_reload_args {
/* Datapath functions definition */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+int qede_xdp_transmit(struct net_device *dev, int n_frames,
+ struct xdp_frame **frames, u32 flags);
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
-void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
int qede_free_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq, int *len);
@@ -557,6 +565,7 @@ void qede_set_dcbnl_ops(struct net_device *ndev);
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
void qede_set_ethtool_ops(struct net_device *netdev);
+void qede_set_udp_tunnels(struct qede_dev *edev);
void qede_reload(struct qede_dev *edev,
struct qede_reload_args *args, bool is_locked);
int qede_change_mtu(struct net_device *dev, int new_mtu);
@@ -570,16 +579,25 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f);
+void qede_forced_speed_maps_init(void);
+int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
+int qede_set_per_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal);
+
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_KDUMP_MIN 63
#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_KDUMP_MIN 63
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
#define QEDE_MIN_PKT_LEN 64
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
index e6e844a8cbc7..2763369bbc61 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -1,7 +1,8 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*/
+ * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
#include <linux/types.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8a426afb6a55..8034d812d5a0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qede NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#include <linux/version.h>
#include <linux/types.h>
#include <linux/netdevice.h>
@@ -38,6 +13,8 @@
#include <linux/pci.h>
#include <linux/capability.h>
#include <linux/vmalloc.h>
+#include <linux/phylink.h>
+
#include "qede.h"
#include "qede_ptp.h"
@@ -190,12 +167,18 @@ static const struct {
enum {
QEDE_PRI_FLAG_CMT,
QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
+ QEDE_PRI_FLAG_RECOVER_ON_ERROR,
+ QEDE_PRI_FLAG_ESL_SUPPORT, /* MFW supports Enhanced System Lockdown */
+ QEDE_PRI_FLAG_ESL_ACTIVE, /* Enhanced System Lockdown Active status */
QEDE_PRI_FLAG_LEN,
};
static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Coupled-Function",
"SmartAN capable",
+ "Recover on error",
+ "ESL capable",
+ "ESL active",
};
enum qede_ethtool_tests {
@@ -217,6 +200,96 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
"Nvram (online)\t\t",
};
+/* Forced speed capabilities maps */
+
+struct qede_forced_speed_map {
+ u32 speed;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
+
+ const u32 *cap_arr;
+ u32 arr_size;
+};
+
+#define QEDE_FORCED_SPEED_MAP(value) \
+{ \
+ .speed = SPEED_##value, \
+ .cap_arr = qede_forced_speed_##value, \
+ .arr_size = ARRAY_SIZE(qede_forced_speed_##value), \
+}
+
+static const u32 qede_forced_speed_1000[] __initconst = {
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+};
+
+static const u32 qede_forced_speed_10000[] __initconst = {
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+};
+
+static const u32 qede_forced_speed_20000[] __initconst = {
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+};
+
+static const u32 qede_forced_speed_25000[] __initconst = {
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+};
+
+static const u32 qede_forced_speed_40000[] __initconst = {
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+};
+
+static const u32 qede_forced_speed_50000[] __initconst = {
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+static const u32 qede_forced_speed_100000[] __initconst = {
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+};
+
+static struct qede_forced_speed_map qede_forced_speed_maps[] __ro_after_init = {
+ QEDE_FORCED_SPEED_MAP(1000),
+ QEDE_FORCED_SPEED_MAP(10000),
+ QEDE_FORCED_SPEED_MAP(20000),
+ QEDE_FORCED_SPEED_MAP(25000),
+ QEDE_FORCED_SPEED_MAP(40000),
+ QEDE_FORCED_SPEED_MAP(50000),
+ QEDE_FORCED_SPEED_MAP(100000),
+};
+
+void __init qede_forced_speed_maps_init(void)
+{
+ struct qede_forced_speed_map *map;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) {
+ map = qede_forced_speed_maps + i;
+
+ linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
+ map->cap_arr = NULL;
+ map->arr_size = 0;
+ }
+}
+
+/* Ethtool callbacks */
+
static void qede_get_strings_stats_txq(struct qede_dev *edev,
struct qede_tx_queue *txq, u8 **buf)
{
@@ -409,6 +482,7 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
static u32 qede_get_priv_flags(struct net_device *dev)
{
struct qede_dev *edev = netdev_priv(dev);
+ bool esl_active;
u32 flags = 0;
if (edev->dev_info.common.num_hwfns > 1)
@@ -417,79 +491,42 @@ static u32 qede_get_priv_flags(struct net_device *dev)
if (edev->dev_info.common.smart_an)
flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT);
+ if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE))
+ flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR);
+
+ if (edev->dev_info.common.esl)
+ flags |= BIT(QEDE_PRI_FLAG_ESL_SUPPORT);
+
+ edev->ops->common->get_esl_status(edev->cdev, &esl_active);
+
+ if (esl_active)
+ flags |= BIT(QEDE_PRI_FLAG_ESL_ACTIVE);
+
return flags;
}
-struct qede_link_mode_mapping {
- u32 qed_link_mode;
- u32 ethtool_link_mode;
-};
+static int qede_set_priv_flags(struct net_device *dev, u32 flags)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 cflags = qede_get_priv_flags(dev);
+ u32 dflags = flags ^ cflags;
-static const struct qede_link_mode_mapping qed_lm_map[] = {
- {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
- {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
- {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
- {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
- {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
- {QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
- {QED_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT},
- {QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT},
- {QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
- {QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT},
- {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
- {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
- {QED_LM_10000baseR_FEC_BIT, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT},
- {QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT},
- {QED_LM_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT},
- {QED_LM_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
- {QED_LM_40000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
- {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
- {QED_LM_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
- {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
- {QED_LM_25000baseSR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
- {QED_LM_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT},
- {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
- {QED_LM_100000baseKR4_Full_BIT,
- ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
- {QED_LM_100000baseSR4_Full_BIT,
- ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
- {QED_LM_100000baseCR4_Full_BIT,
- ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
- {QED_LM_100000baseLR4_ER4_Full_BIT,
- ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
- {QED_LM_50000baseSR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT},
- {QED_LM_1000baseX_Full_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT},
- {QED_LM_10000baseCR_Full_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
- {QED_LM_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
- {QED_LM_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
- {QED_LM_10000baseLRM_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT},
-};
+ /* can only change RECOVER_ON_ERROR flag */
+ if (dflags & ~BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ return -EINVAL;
-#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \
-{ \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \
- if ((caps) & (qed_lm_map[i].qed_link_mode)) \
- __set_bit(qed_lm_map[i].ethtool_link_mode,\
- lk_ksettings->link_modes.name); \
- } \
-}
+ if (flags & BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ set_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+ else
+ clear_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
-#define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \
-{ \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \
- if (test_bit(qed_lm_map[i].ethtool_link_mode, \
- lk_ksettings->link_modes.name)) \
- caps |= qed_lm_map[i].qed_link_mode; \
- } \
+ return 0;
}
static int qede_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
+ typeof(cmd->link_modes) *link_modes = &cmd->link_modes;
struct ethtool_link_settings *base = &cmd->base;
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
@@ -499,14 +536,9 @@ static int qede_get_link_ksettings(struct net_device *dev,
memset(&current_link, 0, sizeof(current_link));
edev->ops->common->get_link(edev->cdev, &current_link);
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported)
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising)
-
- ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
- QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising)
+ linkmode_copy(link_modes->supported, current_link.supported_caps);
+ linkmode_copy(link_modes->advertising, current_link.advertised_caps);
+ linkmode_copy(link_modes->lp_advertising, current_link.lp_caps);
if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
base->speed = current_link.speed;
@@ -530,9 +562,10 @@ static int qede_set_link_ksettings(struct net_device *dev,
{
const struct ethtool_link_settings *base = &cmd->base;
struct qede_dev *edev = netdev_priv(dev);
+ const struct qede_forced_speed_map *map;
struct qed_link_output current_link;
struct qed_link_params params;
- u32 sup_caps;
+ u32 i;
if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev, "Link settings are not allowed to be changed\n");
@@ -544,107 +577,40 @@ static int qede_set_link_ksettings(struct net_device *dev,
params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
+
if (base->autoneg == AUTONEG_ENABLE) {
- if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
+ if (!phylink_test(current_link.supported_caps, Autoneg)) {
DP_INFO(edev, "Auto negotiation is not supported\n");
return -EOPNOTSUPP;
}
params.autoneg = true;
params.forced_speed = 0;
- QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising)
+
+ linkmode_copy(params.adv_speeds, cmd->link_modes.advertising);
} else { /* forced speed */
params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
params.autoneg = false;
params.forced_speed = base->speed;
- switch (base->speed) {
- case SPEED_1000:
- sup_caps = QED_LM_1000baseT_Full_BIT |
- QED_LM_1000baseKX_Full_BIT |
- QED_LM_1000baseX_Full_BIT;
- if (!(current_link.supported_caps & sup_caps)) {
- DP_INFO(edev, "1G speed not supported\n");
- return -EINVAL;
- }
- params.adv_speeds = current_link.supported_caps &
- sup_caps;
- break;
- case SPEED_10000:
- sup_caps = QED_LM_10000baseT_Full_BIT |
- QED_LM_10000baseKR_Full_BIT |
- QED_LM_10000baseKX4_Full_BIT |
- QED_LM_10000baseR_FEC_BIT |
- QED_LM_10000baseCR_Full_BIT |
- QED_LM_10000baseSR_Full_BIT |
- QED_LM_10000baseLR_Full_BIT |
- QED_LM_10000baseLRM_Full_BIT;
- if (!(current_link.supported_caps & sup_caps)) {
- DP_INFO(edev, "10G speed not supported\n");
- return -EINVAL;
- }
- params.adv_speeds = current_link.supported_caps &
- sup_caps;
- break;
- case SPEED_20000:
- if (!(current_link.supported_caps &
- QED_LM_20000baseKR2_Full_BIT)) {
- DP_INFO(edev, "20G speed not supported\n");
- return -EINVAL;
- }
- params.adv_speeds = QED_LM_20000baseKR2_Full_BIT;
- break;
- case SPEED_25000:
- sup_caps = QED_LM_25000baseKR_Full_BIT |
- QED_LM_25000baseCR_Full_BIT |
- QED_LM_25000baseSR_Full_BIT;
- if (!(current_link.supported_caps & sup_caps)) {
- DP_INFO(edev, "25G speed not supported\n");
- return -EINVAL;
- }
- params.adv_speeds = current_link.supported_caps &
- sup_caps;
- break;
- case SPEED_40000:
- sup_caps = QED_LM_40000baseLR4_Full_BIT |
- QED_LM_40000baseKR4_Full_BIT |
- QED_LM_40000baseCR4_Full_BIT |
- QED_LM_40000baseSR4_Full_BIT;
- if (!(current_link.supported_caps & sup_caps)) {
- DP_INFO(edev, "40G speed not supported\n");
- return -EINVAL;
- }
- params.adv_speeds = current_link.supported_caps &
- sup_caps;
- break;
- case SPEED_50000:
- sup_caps = QED_LM_50000baseKR2_Full_BIT |
- QED_LM_50000baseCR2_Full_BIT |
- QED_LM_50000baseSR2_Full_BIT;
- if (!(current_link.supported_caps & sup_caps)) {
- DP_INFO(edev, "50G speed not supported\n");
- return -EINVAL;
- }
- params.adv_speeds = current_link.supported_caps &
- sup_caps;
- break;
- case SPEED_100000:
- sup_caps = QED_LM_100000baseKR4_Full_BIT |
- QED_LM_100000baseSR4_Full_BIT |
- QED_LM_100000baseCR4_Full_BIT |
- QED_LM_100000baseLR4_ER4_Full_BIT;
- if (!(current_link.supported_caps & sup_caps)) {
- DP_INFO(edev, "100G speed not supported\n");
- return -EINVAL;
- }
- params.adv_speeds = current_link.supported_caps &
- sup_caps;
- break;
- default:
- DP_INFO(edev, "Unsupported speed %u\n", base->speed);
- return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) {
+ map = qede_forced_speed_maps + i;
+
+ if (base->speed != map->speed ||
+ !linkmode_intersects(current_link.supported_caps,
+ map->caps))
+ continue;
+
+ linkmode_and(params.adv_speeds,
+ current_link.supported_caps, map->caps);
+ goto set_link;
}
+
+ DP_INFO(edev, "Unsupported speed %u\n", base->speed);
+ return -EINVAL;
}
+set_link:
params.link_up = true;
edev->ops->common->set_link(edev->cdev, &params);
@@ -658,7 +624,7 @@ static void qede_get_drvinfo(struct net_device *ndev,
struct qede_dev *edev = netdev_priv(ndev);
char mbi[ETHTOOL_FWVERS_LEN];
- strlcpy(info->driver, "qede", sizeof(info->driver));
+ strscpy(info->driver, "qede", sizeof(info->driver));
snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
edev->dev_info.common.fw_major,
@@ -672,13 +638,13 @@ static void qede_get_drvinfo(struct net_device *ndev,
(edev->dev_info.common.mfw_rev >> 8) & 0xFF,
edev->dev_info.common.mfw_rev & 0xFF);
- if ((strlen(storm) + strlen(DRV_MODULE_VERSION) + strlen("[storm] ")) <
+ if ((strlen(storm) + strlen("[storm]")) <
sizeof(info->version))
snprintf(info->version, sizeof(info->version),
- "%s [storm %s]", DRV_MODULE_VERSION, storm);
+ "[storm %s]", storm);
else
snprintf(info->version, sizeof(info->version),
- "%s %s", DRV_MODULE_VERSION, storm);
+ "%s", storm);
if (edev->dev_info.common.mbi_version) {
snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d",
@@ -695,7 +661,7 @@ static void qede_get_drvinfo(struct net_device *ndev,
"mfw %s", mfw);
}
- strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
}
static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
@@ -807,7 +773,9 @@ static int qede_flash_device(struct net_device *dev,
}
static int qede_get_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
void *rx_handle = NULL, *tx_handle = NULL;
struct qede_dev *edev = netdev_priv(dev);
@@ -866,8 +834,9 @@ out:
return rc;
}
-static int qede_set_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
+int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct qede_dev *edev = netdev_priv(dev);
struct qede_fastpath *fp;
@@ -902,6 +871,8 @@ static int qede_set_coalesce(struct net_device *dev,
"Set RX coalesce error, rc = %d\n", rc);
return rc;
}
+ edev->coal_entry[i].rxc = rxc;
+ edev->coal_entry[i].isvalid = true;
}
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
@@ -921,6 +892,8 @@ static int qede_set_coalesce(struct net_device *dev,
"Set TX coalesce error, rc = %d\n", rc);
return rc;
}
+ edev->coal_entry[i].txc = txc;
+ edev->coal_entry[i].isvalid = true;
}
}
@@ -928,7 +901,9 @@ static int qede_set_coalesce(struct net_device *dev,
}
static void qede_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -939,7 +914,9 @@ static void qede_get_ringparam(struct net_device *dev,
}
static int qede_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -1008,13 +985,16 @@ static int qede_set_pauseparam(struct net_device *dev,
memset(&params, 0, sizeof(params));
params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
+
if (epause->autoneg) {
- if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
+ if (!phylink_test(current_link.supported_caps, Autoneg)) {
DP_INFO(edev, "autoneg not supported\n");
return -EINVAL;
}
+
params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
}
+
if (epause->rx_pause)
params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
if (epause->tx_pause)
@@ -1070,7 +1050,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
args.u.mtu = new_mtu;
args.func = &qede_update_mtu;
qede_reload(edev, &args, false);
-
+#if IS_ENABLED(CONFIG_QED_RDMA)
+ qede_rdma_event_change_mtu(edev);
+#endif
edev->ops->common->update_mtu(edev->cdev, new_mtu);
return 0;
@@ -1566,7 +1548,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
static int qede_selftest_receive_traffic(struct qede_dev *edev)
{
- u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+ u16 sw_rx_index, len;
struct eth_fast_path_rx_reg_cqe *fp_cqe;
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
@@ -1596,17 +1578,6 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
continue;
}
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-
- /* Memory barrier to prevent the CPU from doing speculative
- * reads of CQE/BD before reading hw_comp_cons. If the CQE is
- * read before it is written by FW, then FW writes CQE and SB,
- * and then the CPU reads the hw_comp_cons, it will use an old
- * CQE.
- */
- rmb();
-
/* Get the CQE from the completion ring */
cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
@@ -1889,6 +1860,78 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
+static u32 qede_link_to_ethtool_fec(u32 link_fec)
+{
+ u32 eth_fec = 0;
+
+ if (link_fec & QED_FEC_MODE_NONE)
+ eth_fec |= ETHTOOL_FEC_OFF;
+ if (link_fec & QED_FEC_MODE_FIRECODE)
+ eth_fec |= ETHTOOL_FEC_BASER;
+ if (link_fec & QED_FEC_MODE_RS)
+ eth_fec |= ETHTOOL_FEC_RS;
+ if (link_fec & QED_FEC_MODE_AUTO)
+ eth_fec |= ETHTOOL_FEC_AUTO;
+ if (link_fec & QED_FEC_MODE_UNSUPPORTED)
+ eth_fec |= ETHTOOL_FEC_NONE;
+
+ return eth_fec;
+}
+
+static u32 qede_ethtool_to_link_fec(u32 eth_fec)
+{
+ u32 link_fec = 0;
+
+ if (eth_fec & ETHTOOL_FEC_OFF)
+ link_fec |= QED_FEC_MODE_NONE;
+ if (eth_fec & ETHTOOL_FEC_BASER)
+ link_fec |= QED_FEC_MODE_FIRECODE;
+ if (eth_fec & ETHTOOL_FEC_RS)
+ link_fec |= QED_FEC_MODE_RS;
+ if (eth_fec & ETHTOOL_FEC_AUTO)
+ link_fec |= QED_FEC_MODE_AUTO;
+ if (eth_fec & ETHTOOL_FEC_NONE)
+ link_fec |= QED_FEC_MODE_UNSUPPORTED;
+
+ return link_fec;
+}
+
+static int qede_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output curr_link;
+
+ memset(&curr_link, 0, sizeof(curr_link));
+ edev->ops->common->get_link(edev->cdev, &curr_link);
+
+ fecparam->active_fec = qede_link_to_ethtool_fec(curr_link.active_fec);
+ fecparam->fec = qede_link_to_ethtool_fec(curr_link.sup_fec);
+
+ return 0;
+}
+
+static int qede_set_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_params params;
+
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.override_flags |= QED_LINK_OVERRIDE_FEC_CONFIG;
+ params.fec = qede_ethtool_to_link_fec(fecparam->fec);
+ params.link_up = true;
+
+ edev->ops->common->set_link(edev->cdev, &params);
+
+ return 0;
+}
+
static int qede_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -2086,76 +2129,207 @@ err:
return rc;
}
+int qede_set_per_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_fastpath *fp;
+ u16 rxc, txc;
+ int rc = 0;
+
+ if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
+ coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
+ DP_INFO(edev,
+ "Can't support requested %s coalesce value [max supported value %d]\n",
+ coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
+ : "tx",
+ QED_COALESCE_MAX);
+ return -EINVAL;
+ }
+
+ rxc = (u16)coal->rx_coalesce_usecs;
+ txc = (u16)coal->tx_coalesce_usecs;
+
+ __qede_lock(edev);
+ if (queue >= edev->num_queues) {
+ DP_INFO(edev, "Invalid queue\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+
+ if (edev->fp_array[queue].type & QEDE_FASTPATH_RX) {
+ rc = edev->ops->common->set_coalesce(edev->cdev,
+ rxc, 0,
+ fp->rxq->handle);
+ if (rc) {
+ DP_INFO(edev,
+ "Set RX coalesce error, rc = %d\n", rc);
+ goto out;
+ }
+ edev->coal_entry[queue].rxc = rxc;
+ edev->coal_entry[queue].isvalid = true;
+ }
+
+ if (edev->fp_array[queue].type & QEDE_FASTPATH_TX) {
+ rc = edev->ops->common->set_coalesce(edev->cdev,
+ 0, txc,
+ fp->txq->handle);
+ if (rc) {
+ DP_INFO(edev,
+ "Set TX coalesce error, rc = %d\n", rc);
+ goto out;
+ }
+ edev->coal_entry[queue].txc = txc;
+ edev->coal_entry[queue].isvalid = true;
+ }
+out:
+ __qede_unlock(edev);
+
+ return rc;
+}
+
+static int qede_get_per_coalesce(struct net_device *dev,
+ u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ void *rx_handle = NULL, *tx_handle = NULL;
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_fastpath *fp;
+ u16 rx_coal, tx_coal;
+ int rc = 0;
+
+ rx_coal = QED_DEFAULT_RX_USECS;
+ tx_coal = QED_DEFAULT_TX_USECS;
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+ __qede_lock(edev);
+ if (queue >= edev->num_queues) {
+ DP_INFO(edev, "Invalid queue\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+
+ if (fp->type & QEDE_FASTPATH_RX)
+ rx_handle = fp->rxq->handle;
+
+ rc = edev->ops->get_coalesce(edev->cdev, &rx_coal,
+ rx_handle);
+ if (rc) {
+ DP_INFO(edev, "Read Rx coalesce error\n");
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+ if (fp->type & QEDE_FASTPATH_TX)
+ tx_handle = fp->txq->handle;
+
+ rc = edev->ops->get_coalesce(edev->cdev, &tx_coal,
+ tx_handle);
+ if (rc)
+ DP_INFO(edev, "Read Tx coalesce error\n");
+
+out:
+ __qede_unlock(edev);
+
+ coal->rx_coalesce_usecs = rx_coal;
+ coal->tx_coalesce_usecs = tx_coal;
+
+ return rc;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
- .get_link_ksettings = qede_get_link_ksettings,
- .set_link_ksettings = qede_set_link_ksettings,
- .get_drvinfo = qede_get_drvinfo,
- .get_regs_len = qede_get_regs_len,
- .get_regs = qede_get_regs,
- .get_wol = qede_get_wol,
- .set_wol = qede_set_wol,
- .get_msglevel = qede_get_msglevel,
- .set_msglevel = qede_set_msglevel,
- .nway_reset = qede_nway_reset,
- .get_link = qede_get_link,
- .get_coalesce = qede_get_coalesce,
- .set_coalesce = qede_set_coalesce,
- .get_ringparam = qede_get_ringparam,
- .set_ringparam = qede_set_ringparam,
- .get_pauseparam = qede_get_pauseparam,
- .set_pauseparam = qede_set_pauseparam,
- .get_strings = qede_get_strings,
- .set_phys_id = qede_set_phys_id,
- .get_ethtool_stats = qede_get_ethtool_stats,
- .get_priv_flags = qede_get_priv_flags,
- .get_sset_count = qede_get_sset_count,
- .get_rxnfc = qede_get_rxnfc,
- .set_rxnfc = qede_set_rxnfc,
- .get_rxfh_indir_size = qede_get_rxfh_indir_size,
- .get_rxfh_key_size = qede_get_rxfh_key_size,
- .get_rxfh = qede_get_rxfh,
- .set_rxfh = qede_set_rxfh,
- .get_ts_info = qede_get_ts_info,
- .get_channels = qede_get_channels,
- .set_channels = qede_set_channels,
- .self_test = qede_self_test,
- .get_module_info = qede_get_module_info,
- .get_module_eeprom = qede_get_module_eeprom,
- .get_eee = qede_get_eee,
- .set_eee = qede_set_eee,
-
- .get_tunable = qede_get_tunable,
- .set_tunable = qede_set_tunable,
- .flash_device = qede_flash_device,
- .get_dump_flag = qede_get_dump_flag,
- .get_dump_data = qede_get_dump_data,
- .set_dump = qede_set_dump,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_link_ksettings = qede_get_link_ksettings,
+ .set_link_ksettings = qede_set_link_ksettings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_regs_len = qede_get_regs_len,
+ .get_regs = qede_get_regs,
+ .get_wol = qede_get_wol,
+ .set_wol = qede_set_wol,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .nway_reset = qede_nway_reset,
+ .get_link = qede_get_link,
+ .get_coalesce = qede_get_coalesce,
+ .set_coalesce = qede_set_coalesce,
+ .get_ringparam = qede_get_ringparam,
+ .set_ringparam = qede_set_ringparam,
+ .get_pauseparam = qede_get_pauseparam,
+ .set_pauseparam = qede_set_pauseparam,
+ .get_strings = qede_get_strings,
+ .set_phys_id = qede_set_phys_id,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
+ .set_priv_flags = qede_set_priv_flags,
+ .get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
+ .get_ts_info = qede_get_ts_info,
+ .get_channels = qede_get_channels,
+ .set_channels = qede_set_channels,
+ .self_test = qede_self_test,
+ .get_module_info = qede_get_module_info,
+ .get_module_eeprom = qede_get_module_eeprom,
+ .get_eee = qede_get_eee,
+ .set_eee = qede_set_eee,
+ .get_fecparam = qede_get_fecparam,
+ .set_fecparam = qede_set_fecparam,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
+ .get_per_queue_coalesce = qede_get_per_coalesce,
+ .set_per_queue_coalesce = qede_set_per_coalesce,
+ .flash_device = qede_flash_device,
+ .get_dump_flag = qede_get_dump_flag,
+ .get_dump_data = qede_get_dump_data,
+ .set_dump = qede_set_dump,
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
- .get_link_ksettings = qede_get_link_ksettings,
- .get_drvinfo = qede_get_drvinfo,
- .get_msglevel = qede_get_msglevel,
- .set_msglevel = qede_set_msglevel,
- .get_link = qede_get_link,
- .get_coalesce = qede_get_coalesce,
- .set_coalesce = qede_set_coalesce,
- .get_ringparam = qede_get_ringparam,
- .set_ringparam = qede_set_ringparam,
- .get_strings = qede_get_strings,
- .get_ethtool_stats = qede_get_ethtool_stats,
- .get_priv_flags = qede_get_priv_flags,
- .get_sset_count = qede_get_sset_count,
- .get_rxnfc = qede_get_rxnfc,
- .set_rxnfc = qede_set_rxnfc,
- .get_rxfh_indir_size = qede_get_rxfh_indir_size,
- .get_rxfh_key_size = qede_get_rxfh_key_size,
- .get_rxfh = qede_get_rxfh,
- .set_rxfh = qede_set_rxfh,
- .get_channels = qede_get_channels,
- .set_channels = qede_set_channels,
- .get_tunable = qede_get_tunable,
- .set_tunable = qede_set_tunable,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_link_ksettings = qede_get_link_ksettings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_coalesce = qede_get_coalesce,
+ .set_coalesce = qede_set_coalesce,
+ .get_ringparam = qede_get_ringparam,
+ .set_ringparam = qede_set_ringparam,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
+ .get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
+ .get_channels = qede_get_channels,
+ .set_channels = qede_set_channels,
+ .get_per_queue_coalesce = qede_get_per_coalesce,
+ .set_per_queue_coalesce = qede_set_per_coalesce,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
};
void qede_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index d1ce4531d01a..3010833ddde3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qede NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <net/udp_tunnel.h>
@@ -336,6 +311,9 @@ int qede_alloc_arfs(struct qede_dev *edev)
{
int i;
+ if (!edev->dev_info.common.b_arfs_capable)
+ return -EINVAL;
+
edev->arfs = vzalloc(sizeof(*edev->arfs));
if (!edev->arfs)
return -ENOMEM;
@@ -579,7 +557,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced)
return;
}
- ether_addr_copy(edev->ndev->dev_addr, mac);
+ eth_hw_addr_set(edev->ndev, mac);
__qede_unlock(edev);
}
@@ -639,32 +617,30 @@ void qede_fill_rss_params(struct qede_dev *edev,
static int qede_set_ucast_rx_mac(struct qede_dev *edev,
enum qed_filter_xcast_params_type opcode,
- unsigned char mac[ETH_ALEN])
+ const unsigned char mac[ETH_ALEN])
{
- struct qed_filter_params filter_cmd;
+ struct qed_filter_ucast_params ucast;
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_UCAST;
- filter_cmd.filter.ucast.type = opcode;
- filter_cmd.filter.ucast.mac_valid = 1;
- ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+ memset(&ucast, 0, sizeof(ucast));
+ ucast.type = opcode;
+ ucast.mac_valid = 1;
+ ether_addr_copy(ucast.mac, mac);
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
+ return edev->ops->filter_config_ucast(edev->cdev, &ucast);
}
static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
enum qed_filter_xcast_params_type opcode,
u16 vid)
{
- struct qed_filter_params filter_cmd;
+ struct qed_filter_ucast_params ucast;
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_UCAST;
- filter_cmd.filter.ucast.type = opcode;
- filter_cmd.filter.ucast.vlan_valid = 1;
- filter_cmd.filter.ucast.vlan = vid;
+ memset(&ucast, 0, sizeof(ucast));
+ ucast.type = opcode;
+ ucast.vlan_valid = 1;
+ ucast.vlan = vid;
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
+ return edev->ops->filter_config_ucast(edev->cdev, &ucast);
}
static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
@@ -853,7 +829,7 @@ int qede_configure_vlan_filters(struct qede_dev *edev)
int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct qede_dev *edev = netdev_priv(dev);
- struct qede_vlan *vlan = NULL;
+ struct qede_vlan *vlan;
int rc = 0;
DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
@@ -864,7 +840,7 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
if (vlan->vid == vid)
break;
- if (!vlan || (vlan->vid != vid)) {
+ if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Vlan isn't configured\n");
goto out;
@@ -978,115 +954,67 @@ int qede_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
-void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
+static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
struct qede_dev *edev = netdev_priv(dev);
struct qed_tunn_params tunn_params;
- u16 t_port = ntohs(ti->port);
+ struct udp_tunnel_info ti;
+ u16 *save_port;
int rc;
memset(&tunn_params, 0, sizeof(tunn_params));
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (!edev->dev_info.common.vxlan_enable)
- return;
-
- if (edev->vxlan_dst_port)
- return;
-
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
tunn_params.update_vxlan_port = 1;
- tunn_params.vxlan_port = t_port;
-
- __qede_lock(edev);
- rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
- __qede_unlock(edev);
-
- if (!rc) {
- edev->vxlan_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
- t_port);
- } else {
- DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
- t_port);
- }
-
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!edev->dev_info.common.geneve_enable)
- return;
-
- if (edev->geneve_dst_port)
- return;
-
+ tunn_params.vxlan_port = ntohs(ti.port);
+ save_port = &edev->vxlan_dst_port;
+ } else {
tunn_params.update_geneve_port = 1;
- tunn_params.geneve_port = t_port;
-
- __qede_lock(edev);
- rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
- __qede_unlock(edev);
-
- if (!rc) {
- edev->geneve_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG,
- "Added geneve port=%d\n", t_port);
- } else {
- DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
- t_port);
- }
-
- break;
- default:
- return;
+ tunn_params.geneve_port = ntohs(ti.port);
+ save_port = &edev->geneve_dst_port;
}
-}
-
-void qede_udp_tunnel_del(struct net_device *dev,
- struct udp_tunnel_info *ti)
-{
- struct qede_dev *edev = netdev_priv(dev);
- struct qed_tunn_params tunn_params;
- u16 t_port = ntohs(ti->port);
- memset(&tunn_params, 0, sizeof(tunn_params));
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (t_port != edev->vxlan_dst_port)
- return;
-
- tunn_params.update_vxlan_port = 1;
- tunn_params.vxlan_port = 0;
-
- __qede_lock(edev);
- edev->ops->tunn_config(edev->cdev, &tunn_params);
- __qede_unlock(edev);
-
- edev->vxlan_dst_port = 0;
-
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
- t_port);
-
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (t_port != edev->geneve_dst_port)
- return;
-
- tunn_params.update_geneve_port = 1;
- tunn_params.geneve_port = 0;
+ __qede_lock(edev);
+ rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+ if (rc)
+ return rc;
- __qede_lock(edev);
- edev->ops->tunn_config(edev->cdev, &tunn_params);
- __qede_unlock(edev);
+ *save_port = ntohs(ti.port);
+ return 0;
+}
- edev->geneve_dst_port = 0;
+static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
+ .sync_table = qede_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+}, qede_udp_tunnels_vxlan = {
+ .sync_table = qede_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+}, qede_udp_tunnels_geneve = {
+ .sync_table = qede_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
- t_port);
- break;
- default:
- return;
- }
+void qede_set_udp_tunnels(struct qede_dev *edev)
+{
+ if (edev->dev_info.common.vxlan_enable &&
+ edev->dev_info.common.geneve_enable)
+ edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_both;
+ else if (edev->dev_info.common.vxlan_enable)
+ edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_vxlan;
+ else if (edev->dev_info.common.geneve_enable)
+ edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_geneve;
}
static void qede_xdp_reload_func(struct qede_dev *edev,
@@ -1118,9 +1046,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return qede_xdp_set(edev, xdp->prog);
- case XDP_QUERY_PROG:
- xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
- return 0;
default:
return -EINVAL;
}
@@ -1130,18 +1055,17 @@ static int qede_set_mcast_rx_mac(struct qede_dev *edev,
enum qed_filter_xcast_params_type opcode,
unsigned char *mac, int num_macs)
{
- struct qed_filter_params filter_cmd;
+ struct qed_filter_mcast_params mcast;
int i;
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_MCAST;
- filter_cmd.filter.mcast.type = opcode;
- filter_cmd.filter.mcast.num = num_macs;
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.type = opcode;
+ mcast.num = num_macs;
for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
- ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+ ether_addr_copy(mcast.mac[i], mac);
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
+ return edev->ops->filter_config_mcast(edev->cdev, &mcast);
}
int qede_set_mac_addr(struct net_device *ndev, void *p)
@@ -1177,7 +1101,7 @@ int qede_set_mac_addr(struct net_device *ndev, void *p)
goto out;
}
- ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(ndev, addr->sa_data);
DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
if (edev->state != QEDE_STATE_OPEN) {
@@ -1267,7 +1191,6 @@ void qede_config_rx_mode(struct net_device *ndev)
{
enum qed_filter_rx_mode_type accept_flags;
struct qede_dev *edev = netdev_priv(ndev);
- struct qed_filter_params rx_mode;
unsigned char *uc_macs, *temp;
struct netdev_hw_addr *ha;
int rc, uc_count;
@@ -1293,10 +1216,6 @@ void qede_config_rx_mode(struct net_device *ndev)
netif_addr_unlock_bh(ndev);
- /* Configure the struct for the Rx mode */
- memset(&rx_mode, 0, sizeof(struct qed_filter_params));
- rx_mode.type = QED_FILTER_TYPE_RX_MODE;
-
/* Remove all previous unicast secondary macs and multicast macs
* (configure / leave the primary mac)
*/
@@ -1344,8 +1263,7 @@ void qede_config_rx_mode(struct net_device *ndev)
qede_config_accept_any_vlan(edev, false);
}
- rx_mode.filter.accept_flags = accept_flags;
- edev->ops->filter_config(edev->cdev, &rx_mode);
+ edev->ops->filter_config_rx_mode(edev->cdev, accept_flags);
out:
kfree(uc_macs);
}
@@ -1746,7 +1664,8 @@ unlock:
}
static int qede_parse_actions(struct qede_dev *edev,
- struct flow_action *flow_action)
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
int i;
@@ -1756,6 +1675,9 @@ static int qede_parse_actions(struct qede_dev *edev,
return -EINVAL;
}
+ if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -1785,8 +1707,8 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
- if ((match.key->src && match.mask->src != U16_MAX) ||
- (match.key->dst && match.mask->dst != U16_MAX)) {
+ if ((match.key->src && match.mask->src != htons(U16_MAX)) ||
+ (match.key->dst && match.mask->dst != htons(U16_MAX))) {
DP_NOTICE(edev, "Do not support ports masks\n");
return -EINVAL;
}
@@ -1838,8 +1760,8 @@ qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
- if ((match.key->src && match.mask->src != U32_MAX) ||
- (match.key->dst && match.mask->dst != U32_MAX)) {
+ if ((match.key->src && match.mask->src != htonl(U32_MAX)) ||
+ (match.key->dst && match.mask->dst != htonl(U32_MAX))) {
DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
return -EINVAL;
}
@@ -1970,7 +1892,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, &f->rule->action))
+ if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -2038,7 +1960,7 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
return -EINVAL;
}
- if (qede_parse_actions(edev, flow_action))
+ if (qede_parse_actions(edev, flow_action, NULL))
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index c6c20776b474..7c2af482192d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1,40 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qede NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/bpf_trace.h>
#include <net/udp_tunnel.h>
#include <linux/ip.h>
+#include <net/gro.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <linux/if_ether.h>
@@ -284,11 +260,9 @@ static int map_frag_to_bd(struct qede_tx_queue *txq,
static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
{
if (is_encap_pkt)
- return (skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data);
- else
- return (skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
@@ -327,51 +301,89 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
wmb();
}
-static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
- struct sw_rx_data *metadata, u16 padding, u16 length)
+static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
+ u16 len, struct page *page, struct xdp_frame *xdpf)
{
- struct qede_tx_queue *txq = fp->xdp_tx;
- struct eth_tx_1st_bd *first_bd;
- u16 idx = txq->sw_tx_prod;
+ struct eth_tx_1st_bd *bd;
+ struct sw_tx_xdp *xdp;
u16 val;
- if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+ if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
+ txq->num_tx_buffers)) {
txq->stopped_cnt++;
return -ENOMEM;
}
- first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
-
- memset(first_bd, 0, sizeof(*first_bd));
- first_bd->data.bd_flags.bitfields =
- BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
+ bd = qed_chain_produce(&txq->tx_pbl);
+ bd->data.nbds = 1;
+ bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
- val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
- first_bd->data.bitfields |= cpu_to_le16(val);
- first_bd->data.nbds = 1;
+ bd->data.bitfields = cpu_to_le16(val);
/* We can safely ignore the offset, as it's 0 for XDP */
- BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+ BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
- /* Synchronize the buffer back to device, as program [probably]
- * has changed it.
- */
- dma_sync_single_for_device(&edev->pdev->dev,
- metadata->mapping + padding,
- length, PCI_DMA_TODEVICE);
+ xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
+ xdp->mapping = dma;
+ xdp->page = page;
+ xdp->xdpf = xdpf;
- txq->sw_tx_ring.xdp[idx].page = metadata->data;
- txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
- /* Mark the fastpath for future XDP doorbell */
- fp->xdp_xmit = 1;
-
return 0;
}
+int qede_xdp_transmit(struct net_device *dev, int n_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct device *dmadev = &edev->pdev->dev;
+ struct qede_tx_queue *xdp_tx;
+ struct xdp_frame *xdpf;
+ dma_addr_t mapping;
+ int i, nxmit = 0;
+ u16 xdp_prod;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (unlikely(!netif_running(dev)))
+ return -ENETDOWN;
+
+ i = smp_processor_id() % edev->total_xdp_queues;
+ xdp_tx = edev->fp_array[i].xdp_tx;
+
+ spin_lock(&xdp_tx->xdp_tx_lock);
+
+ for (i = 0; i < n_frames; i++) {
+ xdpf = frames[i];
+
+ mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dmadev, mapping)))
+ break;
+
+ if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
+ NULL, xdpf)))
+ break;
+ nxmit++;
+ }
+
+ if (flags & XDP_XMIT_FLUSH) {
+ xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
+
+ xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+ qede_update_tx_producer(xdp_tx);
+ }
+
+ spin_unlock(&xdp_tx->xdp_tx_lock);
+
+ return nxmit;
+}
+
int qede_txq_has_work(struct qede_tx_queue *txq)
{
u16 hw_bd_cons;
@@ -387,20 +399,31 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
- u16 hw_bd_cons, idx;
+ struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
+ struct device *dev = &edev->pdev->dev;
+ struct xdp_frame *xdpf;
+ u16 hw_bd_cons;
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier();
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
- qed_chain_consume(&txq->tx_pbl);
- idx = txq->sw_tx_cons;
+ xdp_info = xdp_arr + txq->sw_tx_cons;
+ xdpf = xdp_info->xdpf;
+
+ if (xdpf) {
+ dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
+ DMA_TO_DEVICE);
+ xdp_return_frame(xdpf);
- dma_unmap_page(&edev->pdev->dev,
- txq->sw_tx_ring.xdp[idx].mapping,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(txq->sw_tx_ring.xdp[idx].page);
+ xdp_info->xdpf = NULL;
+ } else {
+ dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(xdp_info->page);
+ }
+ qed_chain_consume(&txq->tx_pbl);
txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++;
}
@@ -723,6 +746,9 @@ qede_build_skb(struct qede_rx_queue *rxq,
buf = page_address(bd->data) + bd->page_offset;
skb = build_skb(buf, rxq->rx_buf_seg_size);
+ if (unlikely(!skb))
+ return NULL;
+
skb_reserve(skb, pad);
skb_put(skb, len);
@@ -1061,19 +1087,11 @@ static bool qede_rx_xdp(struct qede_dev *edev,
struct xdp_buff xdp;
enum xdp_action act;
- xdp.data_hard_start = page_address(bd->data);
- xdp.data = xdp.data_hard_start + *data_offset;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.rxq = &rxq->xdp_rxq;
+ xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
+ *len, false);
- /* Queues always have a full reset currently, so for the time
- * being until there's atomic program replace just mark read
- * side for map helpers.
- */
- rcu_read_lock();
act = bpf_prog_run_xdp(prog, &xdp);
- rcu_read_unlock();
/* Recalculate, as XDP might have changed the headers */
*data_offset = xdp.data - xdp.data_hard_start;
@@ -1088,32 +1106,59 @@ static bool qede_rx_xdp(struct qede_dev *edev,
switch (act) {
case XDP_TX:
/* We need the replacement buffer before transmit. */
- if (qede_alloc_rx_buffer(rxq, true)) {
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
qede_recycle_rx_bd_ring(rxq, 1);
+
trace_xdp_exception(edev->ndev, prog, act);
- return false;
+ break;
}
/* Now if there's a transmission problem, we'd still have to
* throw current buffer, as replacement was already allocated.
*/
- if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
- dma_unmap_page(rxq->dev, bd->mapping,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
+ *data_offset, *len, bd->data,
+ NULL))) {
+ dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
+ rxq->data_direction);
__free_page(bd->data);
+
trace_xdp_exception(edev->ndev, prog, act);
+ } else {
+ dma_sync_single_for_device(rxq->dev,
+ bd->mapping + *data_offset,
+ *len, rxq->data_direction);
+ fp->xdp_xmit |= QEDE_XDP_TX;
}
/* Regardless, we've consumed an Rx BD */
qede_rx_bd_ring_consume(rxq);
- return false;
+ break;
+ case XDP_REDIRECT:
+ /* We need the replacement buffer before transmit. */
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+ qede_recycle_rx_bd_ring(rxq, 1);
+ trace_xdp_exception(edev->ndev, prog, act);
+ break;
+ }
+
+ dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
+ rxq->data_direction);
+
+ if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
+ DP_NOTICE(edev, "Failed to redirect the packet\n");
+ else
+ fp->xdp_xmit |= QEDE_XDP_REDIRECT;
+
+ qede_rx_bd_ring_consume(rxq);
+ break;
default:
- bpf_warn_invalid_xdp_action(act);
- /* Fall through */
+ bpf_warn_invalid_xdp_action(edev->ndev, prog, act);
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(edev->ndev, prog, act);
- /* Fall through */
+ fallthrough;
case XDP_DROP:
qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
}
@@ -1160,12 +1205,9 @@ static int qede_rx_build_jumbo(struct qede_dev *edev,
dma_unmap_page(rxq->dev, bd->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
- bd->data, rxq->rx_headroom, cur_size);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data,
+ rxq->rx_headroom, cur_size, PAGE_SIZE);
- skb->truesize += PAGE_SIZE;
- skb->data_len += cur_size;
- skb->len += cur_size;
pkt_len -= cur_size;
}
@@ -1377,6 +1419,9 @@ int qede_poll(struct napi_struct *napi, int budget)
napi);
struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
+ u16 xdp_prod;
+
+ fp->xdp_xmit = 0;
if (likely(fp->type & QEDE_FASTPATH_TX)) {
int cos;
@@ -1393,7 +1438,8 @@ int qede_poll(struct napi_struct *napi, int budget)
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
- if (rx_work_done < budget) {
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (rx_work_done < budget || !budget) {
if (!qede_poll_is_more_work(fp)) {
napi_complete_done(napi, rx_work_done);
@@ -1404,14 +1450,16 @@ int qede_poll(struct napi_struct *napi, int budget)
}
}
- if (fp->xdp_xmit) {
- u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+ if (fp->xdp_xmit & QEDE_XDP_TX) {
+ xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
- fp->xdp_xmit = 0;
fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
qede_update_tx_producer(fp->xdp_tx);
}
+ if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+ xdp_do_flush_map();
+
return rx_work_done;
}
@@ -1597,6 +1645,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data_split = true;
}
} else {
+ if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
+ DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
+ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+
val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
}
@@ -1737,6 +1792,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
ntohs(udp_hdr(skb)->dest) != gnv_port))
return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK);
+ } else if (l4_proto == IPPROTO_IPIP) {
+ /* IPIP tunnels are unknown to the device or at least unsupported natively,
+ * offloads for them can't be done trivially, so disable them for such skb.
+ */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 34fa3917eb33..953f304b8588 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1,37 +1,12 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qede NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
+#include <linux/crash_dump.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -60,15 +35,12 @@
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
+#include <linux/aer.h>
#include "qede.h"
#include "qede_ptp.h"
-static char version[] =
- "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
-
MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
static uint debug;
module_param(debug, uint, 0);
@@ -124,6 +96,8 @@ static const struct pci_device_id qede_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
#define TX_TIMEOUT (5 * HZ)
@@ -135,10 +109,12 @@ static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
static void qede_schedule_recovery_handler(void *dev);
static void qede_recovery_handler(struct qede_dev *edev);
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
static void qede_get_eth_tlv_data(void *edev, void *data);
static void qede_get_generic_tlv_data(void *edev,
struct qed_generic_tlvs *data);
-
+static void qede_generic_hw_err_handler(struct qede_dev *edev);
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
@@ -163,9 +139,7 @@ static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
{
struct qede_dev *edev = netdev_priv(ndev);
- DP_VERBOSE(edev, QED_MSG_IOV,
- "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+ DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
if (!is_valid_ether_addr(mac)) {
DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
@@ -203,6 +177,10 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
}
#endif
+static const struct pci_error_handlers qede_err_handler = {
+ .error_detected = qede_io_error_detected,
+};
+
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
@@ -212,6 +190,7 @@ static struct pci_driver qede_pci_driver = {
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
+ .err_handler = &qede_err_handler,
};
static struct qed_eth_cb_ops qede_ll_ops = {
@@ -221,6 +200,7 @@ static struct qed_eth_cb_ops qede_ll_ops = {
#endif
.link_update = qede_link_update,
.schedule_recovery_handler = qede_schedule_recovery_handler,
+ .schedule_hw_err_handler = qede_schedule_hw_err_handler,
.get_generic_tlv_data = qede_get_generic_tlv_data,
.get_protocol_tlv_data = qede_get_eth_tlv_data,
},
@@ -274,7 +254,9 @@ int __init qede_init(void)
{
int ret;
- pr_info("qede_init: %s\n", version);
+ pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
+
+ qede_forced_speed_maps_init();
qed_ops = qed_get_eth_ops();
if (!qed_ops) {
@@ -527,6 +509,112 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
+static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
+{
+ char *p_sb = (char *)fp->sb_info->sb_virt;
+ u32 sb_size, i;
+
+ sb_size = sizeof(struct status_block);
+
+ for (i = 0; i < sb_size; i += 8)
+ DP_NOTICE(edev,
+ "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
+ p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
+ p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
+}
+
+static void
+qede_txq_fp_log_metadata(struct qede_dev *edev,
+ struct qede_fastpath *fp, struct qede_tx_queue *txq)
+{
+ struct qed_chain *p_chain = &txq->tx_pbl;
+
+ /* Dump txq/fp/sb ids etc. other metadata */
+ DP_NOTICE(edev,
+ "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
+ fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
+ p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
+
+ /* Dump all the relevant prod/cons indexes */
+ DP_NOTICE(edev,
+ "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
+ le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
+ qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
+}
+
+static void
+qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
+{
+ struct qed_sb_info_dbg sb_dbg;
+ int rc;
+
+ /* sb info */
+ qede_fp_sb_dump(edev, fp);
+
+ memset(&sb_dbg, 0, sizeof(sb_dbg));
+ rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
+
+ DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
+ sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
+
+ /* report to mfw */
+ edev->ops->common->mfw_report(edev->cdev,
+ "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
+ txq->index, le16_to_cpu(*txq->hw_cons_ptr),
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
+ if (!rc)
+ edev->ops->common->mfw_report(edev->cdev,
+ "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
+ txq->index, fp->sb_info->igu_sb_id,
+ sb_dbg.igu_prod, sb_dbg.igu_cons,
+ sb_dbg.pi[TX_PI(txq->cos)]);
+}
+
+static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ netif_carrier_off(dev);
+ DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
+
+ for_each_queue(i) {
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ int cos;
+
+ fp = &edev->fp_array[i];
+ if (!(fp->type & QEDE_FASTPATH_TX))
+ continue;
+
+ for_each_cos_in_txq(edev, cos) {
+ txq = &fp->txq[cos];
+
+ /* Dump basic metadata for all queues */
+ qede_txq_fp_log_metadata(edev, fp, txq);
+
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl))
+ qede_tx_log_print(edev, fp, txq);
+ }
+ }
+
+ if (IS_VF(edev))
+ return;
+
+ if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) {
+ DP_INFO(edev,
+ "Avoid handling a Tx timeout while another HW error is being handled\n");
+ return;
+ }
+
+ set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -605,78 +693,75 @@ qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
}
static const struct net_device_ops qede_netdev_ops = {
- .ndo_open = qede_open,
- .ndo_stop = qede_close,
- .ndo_start_xmit = qede_start_xmit,
- .ndo_select_queue = qede_select_queue,
- .ndo_set_rx_mode = qede_set_rx_mode,
- .ndo_set_mac_address = qede_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = qede_change_mtu,
- .ndo_do_ioctl = qede_ioctl,
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_eth_ioctl = qede_ioctl,
+ .ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
- .ndo_set_vf_mac = qede_set_vf_mac,
- .ndo_set_vf_vlan = qede_set_vf_vlan,
- .ndo_set_vf_trust = qede_set_vf_trust,
+ .ndo_set_vf_mac = qede_set_vf_mac,
+ .ndo_set_vf_vlan = qede_set_vf_vlan,
+ .ndo_set_vf_trust = qede_set_vf_trust,
#endif
- .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
- .ndo_fix_features = qede_fix_features,
- .ndo_set_features = qede_set_features,
- .ndo_get_stats64 = qede_get_stats64,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
- .ndo_set_vf_link_state = qede_set_vf_link_state,
- .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
- .ndo_get_vf_config = qede_get_vf_config,
- .ndo_set_vf_rate = qede_set_vf_rate,
+ .ndo_set_vf_link_state = qede_set_vf_link_state,
+ .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+ .ndo_get_vf_config = qede_get_vf_config,
+ .ndo_set_vf_rate = qede_set_vf_rate,
#endif
- .ndo_udp_tunnel_add = qede_udp_tunnel_add,
- .ndo_udp_tunnel_del = qede_udp_tunnel_del,
- .ndo_features_check = qede_features_check,
- .ndo_bpf = qede_xdp,
+ .ndo_features_check = qede_features_check,
+ .ndo_bpf = qede_xdp,
#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = qede_rx_flow_steer,
+ .ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
- .ndo_setup_tc = qede_setup_tc_offload,
+ .ndo_xdp_xmit = qede_xdp_transmit,
+ .ndo_setup_tc = qede_setup_tc_offload,
};
static const struct net_device_ops qede_netdev_vf_ops = {
- .ndo_open = qede_open,
- .ndo_stop = qede_close,
- .ndo_start_xmit = qede_start_xmit,
- .ndo_select_queue = qede_select_queue,
- .ndo_set_rx_mode = qede_set_rx_mode,
- .ndo_set_mac_address = qede_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = qede_change_mtu,
- .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
- .ndo_fix_features = qede_fix_features,
- .ndo_set_features = qede_set_features,
- .ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = qede_udp_tunnel_add,
- .ndo_udp_tunnel_del = qede_udp_tunnel_del,
- .ndo_features_check = qede_features_check,
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_features_check = qede_features_check,
};
static const struct net_device_ops qede_netdev_vf_xdp_ops = {
- .ndo_open = qede_open,
- .ndo_stop = qede_close,
- .ndo_start_xmit = qede_start_xmit,
- .ndo_select_queue = qede_select_queue,
- .ndo_set_rx_mode = qede_set_rx_mode,
- .ndo_set_mac_address = qede_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = qede_change_mtu,
- .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
- .ndo_fix_features = qede_fix_features,
- .ndo_set_features = qede_set_features,
- .ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = qede_udp_tunnel_add,
- .ndo_udp_tunnel_del = qede_udp_tunnel_del,
- .ndo_features_check = qede_features_check,
- .ndo_bpf = qede_xdp,
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_features_check = qede_features_check,
+ .ndo_bpf = qede_xdp,
+ .ndo_xdp_xmit = qede_xdp_transmit,
};
/* -------------------------------------------------------------------------
@@ -707,8 +792,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->dp_module = dp_module;
edev->dp_level = dp_level;
edev->ops = qed_ops;
- edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
- edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+ if (is_kdump_kernel()) {
+ edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
+ edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
+ } else {
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+ }
DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
info->num_queues, info->num_queues);
@@ -763,7 +854,7 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
+ if (edev->dev_info.common.b_arfs_capable)
hw_features |= NETIF_F_NTUPLE;
if (edev->dev_info.common.vxlan_enable ||
@@ -783,6 +874,8 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_GSO_UDP_TUNNEL_CSUM);
ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM);
+
+ qede_set_udp_tunnels(edev);
}
if (edev->dev_info.common.gre_enable) {
@@ -804,7 +897,7 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
/* Set network device HW mac */
- ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+ eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
ndev->mtu = edev->dev_info.common.mtu;
}
@@ -867,6 +960,7 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
{
u8 fp_combined, fp_rx = edev->fp_num_rx;
struct qede_fastpath *fp;
+ void *mem;
int i;
edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
@@ -876,6 +970,15 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
goto err;
}
+ mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
+ sizeof(*edev->coal_entry), GFP_KERNEL);
+ if (!mem) {
+ DP_ERR(edev, "coalesce entry allocation failed\n");
+ kfree(edev->coal_entry);
+ goto err;
+ }
+ edev->coal_entry = mem;
+
fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
/* Allocate the FP elements for Rx queues followed by combined and then
@@ -963,6 +1066,13 @@ static void qede_sp_task(struct work_struct *work)
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+ /* Disable execution of this deferred work once
+ * qede removal is in progress, this stop any future
+ * scheduling of sp_task.
+ */
+ if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
+ return;
+
/* The locking scheme depends on the specific flag:
* In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
* ensure that ongoing flows are ended and new ones are not started.
@@ -974,7 +1084,8 @@ static void qede_sp_task(struct work_struct *work)
/* SRIOV must be disabled outside the lock to avoid a deadlock.
* The recovery of the active VFs is currently not supported.
*/
- qede_sriov_configure(edev->pdev, 0);
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
#endif
qede_lock(edev);
qede_recovery_handler(edev);
@@ -993,7 +1104,20 @@ static void qede_sp_task(struct work_struct *work)
qede_process_arfs_filters(edev, false);
}
#endif
+ if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
+ qede_generic_hw_err_handler(edev);
__qede_unlock(edev);
+
+ if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
+#ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+ */
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
+#endif
+ edev->ops->common->recovery_process(edev->cdev);
+ }
}
static void qede_update_pf_params(struct qed_dev *cdev)
@@ -1090,11 +1214,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
/* Start the Slowpath-process */
memset(&sp_params, 0, sizeof(sp_params));
sp_params.int_mode = QED_INT_MODE_MSIX;
- sp_params.drv_major = QEDE_MAJOR_VERSION;
- sp_params.drv_minor = QEDE_MINOR_VERSION;
- sp_params.drv_rev = QEDE_REVISION_VERSION;
- sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
- strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
rc = qed_ops->common->slowpath_start(cdev, &sp_params);
if (rc) {
pr_notice("Cannot start slowpath\n");
@@ -1113,10 +1233,21 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
rc = -ENOMEM;
goto err2;
}
+
+ edev->devlink = qed_ops->common->devlink_register(cdev);
+ if (IS_ERR(edev->devlink)) {
+ DP_NOTICE(edev, "Cannot register devlink\n");
+ rc = PTR_ERR(edev->devlink);
+ edev->devlink = NULL;
+ goto err3;
+ }
} else {
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct qed_devlink *qdl;
edev = netdev_priv(ndev);
+ qdl = devlink_priv(edev->devlink);
+ qdl->cdev = cdev;
edev->cdev = cdev;
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
@@ -1151,7 +1282,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
/* PTP not supported on VFs */
if (!is_vf)
- qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
+ qede_ptp_enable(edev);
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
@@ -1168,7 +1299,10 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
err4:
qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
err3:
- free_netdev(edev->ndev);
+ if (mode != QEDE_PROBE_RECOVERY)
+ free_netdev(edev->ndev);
+ else
+ edev->cdev = NULL;
err2:
qed_ops->common->slowpath_stop(cdev);
err1:
@@ -1224,6 +1358,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
if (mode != QEDE_REMOVE_RECOVERY) {
+ set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
@@ -1239,7 +1374,13 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
return;
+
+ if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
+ qed_ops->common->devlink_unregister(edev->devlink);
+ edev->devlink = NULL;
+ }
qed_ops->common->remove(cdev);
+ edev->cdev = NULL;
/* Since this can happen out-of-sync with other flows,
* don't release the netdevice until after slowpath stop
@@ -1247,8 +1388,10 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
* [e.g., QED register callbacks] won't break anything when
* accessing the netdevice.
*/
- if (mode != QEDE_REMOVE_RECOVERY)
+ if (mode != QEDE_REMOVE_RECOVERY) {
+ kfree(edev->coal_entry);
free_netdev(ndev);
+ }
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
@@ -1313,7 +1456,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
static int qede_alloc_mem_sb(struct qede_dev *edev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
@@ -1384,6 +1527,11 @@ static void qede_set_tpa_param(struct qede_rx_queue *rxq)
/* This function allocates all memory needed per Rx queue */
static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
+ struct qed_chain_init_params params = {
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = RX_RING_SIZE,
+ };
+ struct qed_dev *cdev = edev->cdev;
int i, rc, size;
rxq->num_rx_buffers = edev->q_num_rx_buffers;
@@ -1398,7 +1546,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
if (rxq->rx_buf_size + size > PAGE_SIZE)
rxq->rx_buf_size = PAGE_SIZE - size;
- /* Segment size to spilt a page in multiple equal parts ,
+ /* Segment size to split a page in multiple equal parts,
* unless XDP is used in which case we'd use the entire page.
*/
if (!edev->xdp_prog) {
@@ -1419,24 +1567,20 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
}
/* Allocate FW Rx ring */
- rc = edev->ops->common->chain_alloc(edev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_NEXT_PTR,
- QED_CHAIN_CNT_TYPE_U16,
- RX_RING_SIZE,
- sizeof(struct eth_rx_bd),
- &rxq->rx_bd_ring, NULL);
+ params.mode = QED_CHAIN_MODE_NEXT_PTR;
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.elem_size = sizeof(struct eth_rx_bd);
+
+ rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, &params);
if (rc)
goto err;
/* Allocate FW completion ring */
- rc = edev->ops->common->chain_alloc(edev->cdev,
- QED_CHAIN_USE_TO_CONSUME,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- RX_RING_SIZE,
- sizeof(union eth_rx_cqe),
- &rxq->rx_comp_ring, NULL);
+ params.mode = QED_CHAIN_MODE_PBL;
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME;
+ params.elem_size = sizeof(union eth_rx_cqe);
+
+ rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, &params);
if (rc)
goto err;
@@ -1473,7 +1617,13 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* This function allocates all memory needed per Tx queue */
static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
- union eth_tx_bd_types *p_virt;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = edev->q_num_tx_buffers,
+ .elem_size = sizeof(union eth_tx_bd_types),
+ };
int size, rc;
txq->num_tx_buffers = edev->q_num_tx_buffers;
@@ -1491,13 +1641,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
goto err;
}
- rc = edev->ops->common->chain_alloc(edev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- txq->num_tx_buffers,
- sizeof(*p_virt),
- &txq->tx_pbl, NULL);
+ rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, &params);
if (rc)
goto err;
@@ -1653,6 +1797,7 @@ static void qede_init_fp(struct qede_dev *edev)
{
int queue_id, rxq_index = 0, txq_index = 0;
struct qede_fastpath *fp;
+ bool init_xdp = false;
for_each_queue(queue_id) {
fp = &edev->fp_array[queue_id];
@@ -1664,6 +1809,9 @@ static void qede_init_fp(struct qede_dev *edev)
fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
rxq_index);
fp->xdp_tx->is_xdp = 1;
+
+ spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
+ init_xdp = true;
}
if (fp->type & QEDE_FASTPATH_RX) {
@@ -1678,7 +1826,14 @@ static void qede_init_fp(struct qede_dev *edev)
/* Driver have no error path from here */
WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
- fp->rxq->rxq_id) < 0);
+ fp->rxq->rxq_id, 0) < 0);
+
+ if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0,
+ NULL)) {
+ DP_NOTICE(edev,
+ "Failed to register XDP memory model\n");
+ }
}
if (fp->type & QEDE_FASTPATH_TX) {
@@ -1694,7 +1849,7 @@ static void qede_init_fp(struct qede_dev *edev)
txq->ndev_txq_id = ndev_tx_id;
if (edev->dev_info.is_legacy)
- txq->is_legacy = 1;
+ txq->is_legacy = true;
txq->dev = &edev->pdev->dev;
}
@@ -1704,6 +1859,11 @@ static void qede_init_fp(struct qede_dev *edev)
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
edev->ndev->name, queue_id);
}
+
+ if (init_xdp) {
+ edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
+ DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
+ }
}
static int qede_set_real_num_queues(struct qede_dev *edev)
@@ -1744,8 +1904,7 @@ static void qede_napi_add_enable(struct qede_dev *edev)
/* Add NAPI objects */
for_each_queue(i) {
- netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
- qede_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
napi_enable(&edev->fp_array[i].napi);
}
}
@@ -1756,7 +1915,6 @@ static void qede_sync_free_irqs(struct qede_dev *edev)
for (i = 0; i < edev->int_info.used_cnt; i++) {
if (edev->int_info.msix_cnt) {
- synchronize_irq(edev->int_info.msix[i].vector);
free_irq(edev->int_info.msix[i].vector,
&edev->fp_array[i]);
} else {
@@ -1765,6 +1923,7 @@ static void qede_sync_free_irqs(struct qede_dev *edev)
}
edev->int_info.used_cnt = 0;
+ edev->int_info.msix_cnt = 0;
}
static int qede_req_msix_irqs(struct qede_dev *edev)
@@ -1797,6 +1956,12 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
&edev->fp_array[i]);
if (rc) {
DP_ERR(edev, "Request fp %d irq failed\n", i);
+#ifdef CONFIG_RFS_ACCEL
+ if (edev->ndev->rx_cpu_rmap)
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+
+ edev->ndev->rx_cpu_rmap = NULL;
+#endif
qede_sync_free_irqs(edev);
return rc;
}
@@ -2189,6 +2354,15 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
rc = qede_stop_queues(edev);
if (rc) {
+#ifdef CONFIG_RFS_ACCEL
+ if (edev->dev_info.common.b_arfs_capable) {
+ qede_poll_for_freeing_arfs_filters(edev);
+ if (edev->ndev->rx_cpu_rmap)
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+
+ edev->ndev->rx_cpu_rmap = NULL;
+ }
+#endif
qede_sync_free_irqs(edev);
goto out;
}
@@ -2199,7 +2373,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+ if (edev->dev_info.common.b_arfs_capable) {
qede_poll_for_freeing_arfs_filters(edev);
qede_free_arfs(edev);
}
@@ -2238,8 +2412,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
bool is_locked)
{
struct qed_link_params link_params;
+ struct ethtool_coalesce coal = {};
u8 num_tc;
- int rc;
+ int rc, i;
DP_INFO(edev, "Starting qede load\n");
@@ -2266,10 +2441,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
if (rc)
goto err2;
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
- rc = qede_alloc_arfs(edev);
- if (rc)
- DP_NOTICE(edev, "aRFS memory allocation failed\n");
+ if (qede_alloc_arfs(edev)) {
+ edev->ndev->features &= ~NETIF_F_NTUPLE;
+ edev->dev_info.common.b_arfs_capable = false;
}
qede_napi_add_enable(edev);
@@ -2301,12 +2475,23 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
edev->state = QEDE_STATE_OPEN;
+ coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
+ coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
+
+ for_each_queue(i) {
+ if (edev->coal_entry[i].isvalid) {
+ coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
+ coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
+ }
+ __qede_unlock(edev);
+ qede_set_per_coalesce(edev->ndev, i, &coal);
+ __qede_lock(edev);
+ }
DP_INFO(edev, "Ending successfully qede load\n");
goto out;
err4:
qede_sync_free_irqs(edev);
- memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
err3:
qede_napi_disable_remove(edev);
err2:
@@ -2367,7 +2552,7 @@ static int qede_open(struct net_device *ndev)
if (rc)
return rc;
- udp_tunnel_get_rx_info(ndev);
+ udp_tunnel_nic_reset_ntf(ndev);
edev->ops->common->update_drv_state(edev->cdev, true);
@@ -2380,7 +2565,8 @@ static int qede_close(struct net_device *ndev)
qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
- edev->ops->common->update_drv_state(edev->cdev, false);
+ if (edev->cdev)
+ edev->ops->common->update_drv_state(edev->cdev, false);
return 0;
}
@@ -2469,7 +2655,7 @@ static void qede_recovery_handler(struct qede_dev *edev)
goto err;
qede_config_rx_mode(edev->ndev);
- udp_tunnel_get_rx_info(edev->ndev);
+ udp_tunnel_nic_reset_ntf(edev->ndev);
}
edev->state = curr_state;
@@ -2482,6 +2668,98 @@ err:
qede_recovery_failed(edev);
}
+static void qede_atomic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Get a call trace of the flow that led to the error */
+ WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
+
+ /* Prevent HW attentions from being reasserted */
+ if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
+ edev->ops->common->attn_clr_enable(cdev, true);
+
+ DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
+}
+
+static void qede_generic_hw_err_handler(struct qede_dev *edev)
+{
+ DP_NOTICE(edev,
+ "Generic sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ if (edev->devlink) {
+ DP_NOTICE(edev, "Reporting fatal error to devlink\n");
+ edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
+ }
+
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+
+ DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
+}
+
+static void qede_set_hw_err_flags(struct qede_dev *edev,
+ enum qed_hw_err_type err_type)
+{
+ unsigned long err_flags = 0;
+
+ switch (err_type) {
+ case QED_HW_ERR_DMAE_FAIL:
+ set_bit(QEDE_ERR_WARN, &err_flags);
+ fallthrough;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_RAMROD_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
+ set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
+ /* make this error as recoverable and start recovery*/
+ set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags);
+ break;
+
+ default:
+ DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
+ break;
+ }
+
+ edev->err_flags |= err_flags;
+}
+
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type)
+{
+ struct qede_dev *edev = dev;
+
+ /* Fan failure cannot be masked by handling of another HW error or by a
+ * concurrent recovery process.
+ */
+ if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_INFO(edev,
+ "Avoid scheduling an error handling while another HW error is being handled\n");
+ return;
+ }
+
+ if (err_type >= QED_HW_ERR_LAST) {
+ DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+ return;
+ }
+
+ edev->last_err_type = err_type;
+ qede_set_hw_err_flags(edev, err_type);
+ qede_atomic_hw_err_handler(edev);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
+}
+
static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
@@ -2505,8 +2783,8 @@ static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
data->feat_flags |= QED_TLV_LSO;
ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
- memset(data->mac[1], 0, ETH_ALEN);
- memset(data->mac[2], 0, ETH_ALEN);
+ eth_zero_addr(data->mac[1]);
+ eth_zero_addr(data->mac[2]);
/* Copy the first two UC macs */
netif_addr_lock_bh(edev->ndev);
i = 1;
@@ -2579,3 +2857,52 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
etlv->num_txqs_full_set = true;
etlv->num_rxqs_full_set = true;
}
+
+/**
+ * qede_io_error_detected(): Called when PCI error is detected
+ *
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ *Return: pci_ers_result_t.
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev)
+ return PCI_ERS_RESULT_NONE;
+
+ DP_NOTICE(edev, "IO error detected [%d]\n", state);
+
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_RECOVERY) {
+ DP_NOTICE(edev, "Device already in the recovery state\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* PF handles the recovery of its VFs */
+ if (IS_VF(edev)) {
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "VF recovery is handled by its PF\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ set_bit(QEDE_SP_AER, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ __qede_unlock(edev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 4c7f7a7fc151..c9c8225f04d6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qede NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#include "qede_ptp.h"
#define QEDE_PTP_TX_TIMEOUT (2 * HZ)
@@ -53,12 +28,12 @@ struct qede_ptp {
};
/**
- * qede_ptp_adjfreq
- * @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * qede_ptp_adjfreq() - Adjust the frequency of the PTP cycle counter.
*
- * Adjust the frequency of the ptp cycle counter by the
- * indicated ppb from the base frequency.
+ * @info: The PTP clock info structure.
+ * @ppb: Parts per billion adjustment from base.
+ *
+ * Return: Zero on success, negative errno otherwise.
*/
static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
{
@@ -329,11 +304,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
"HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
config.tx_type, config.rx_filter);
- if (config.flags) {
- DP_ERR(edev, "config.flags is reserved for future use\n");
- return -EINVAL;
- }
-
ptp->hw_ts_ioctl_called = 1;
ptp->tx_type = config.tx_type;
ptp->rx_filter = config.rx_filter;
@@ -412,6 +382,7 @@ void qede_ptp_disable(struct qede_dev *edev)
if (ptp->tx_skb) {
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
}
/* Disable PTP in HW */
@@ -423,7 +394,7 @@ void qede_ptp_disable(struct qede_dev *edev)
edev->ptp = NULL;
}
-static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+static int qede_ptp_init(struct qede_dev *edev)
{
struct qede_ptp *ptp;
int rc;
@@ -444,25 +415,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
/* Init work queue for Tx timestamping */
INIT_WORK(&ptp->work, qede_ptp_task);
- /* Init cyclecounter and timecounter. This is done only in the first
- * load. If done in every load, PTP application will fail when doing
- * unload / load (e.g. MTU change) while it is running.
- */
- if (init_tc) {
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = qede_ptp_read_cc;
- ptp->cc.mask = CYCLECOUNTER_MASK(64);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-
- timecounter_init(&ptp->tc, &ptp->cc,
- ktime_to_ns(ktime_get_real()));
- }
+ /* Init cyclecounter and timecounter */
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = qede_ptp_read_cc;
+ ptp->cc.mask = CYCLECOUNTER_MASK(64);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
- return rc;
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+ return 0;
}
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
+int qede_ptp_enable(struct qede_dev *edev)
{
struct qede_ptp *ptp;
int rc;
@@ -483,7 +448,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
edev->ptp = ptp;
- rc = qede_ptp_init(edev, init_tc);
+ rc = qede_ptp_init(edev);
if (rc)
goto err1;
@@ -531,19 +496,19 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
&edev->flags)) {
- DP_ERR(edev, "Timestamping in progress\n");
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Timestamping in progress\n");
edev->ptp_skip_txts++;
return;
}
if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
- DP_ERR(edev,
- "Tx timestamping was not enabled, this packet will not be timestamped\n");
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Tx timestamping was not enabled, this pkt will not be timestamped\n");
clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
edev->ptp_skip_txts++;
} else if (unlikely(ptp->tx_skb)) {
- DP_ERR(edev,
- "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Device supports a single outstanding pkt to ts, It will not be ts\n");
clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
edev->ptp_skip_txts++;
} else {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index 691a14c4b2c5..1db0f021c645 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qede NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#ifndef _QEDE_PTP_H_
#define _QEDE_PTP_H_
@@ -41,7 +16,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
void qede_ptp_disable(struct qede_dev *edev);
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
+int qede_ptp_enable(struct qede_dev *edev);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index 2d873ae8a234..6304514a6f2c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qedr NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/list.h>
@@ -105,6 +80,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
qede_rdma_cleanup_event(edev);
destroy_workqueue(edev->rdma_info.rdma_wq);
+ edev->rdma_info.rdma_wq = NULL;
}
int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
@@ -258,16 +234,23 @@ static void qede_rdma_changeaddr(struct qede_dev *edev)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
}
+static void qede_rdma_change_mtu(struct qede_dev *edev)
+{
+ if (qede_rdma_supported(edev)) {
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev,
+ QEDE_CHANGE_MTU);
+ }
+}
+
static struct qede_rdma_event_work *
qede_rdma_get_free_event_node(struct qede_dev *edev)
{
struct qede_rdma_event_work *event_node = NULL;
- struct list_head *list_node = NULL;
bool found = false;
- list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
- event_node = list_entry(list_node, struct qede_rdma_event_work,
- list);
+ list_for_each_entry(event_node, &edev->rdma_info.rdma_event_list,
+ list) {
if (!work_pending(&event_node->work)) {
found = true;
break;
@@ -311,6 +294,9 @@ static void qede_rdma_handle_event(struct work_struct *work)
case QEDE_CHANGE_ADDR:
qede_rdma_changeaddr(edev);
break;
+ case QEDE_CHANGE_MTU:
+ qede_rdma_change_mtu(edev);
+ break;
default:
DP_NOTICE(edev, "Invalid rdma event %d", event);
}
@@ -325,7 +311,7 @@ static void qede_rdma_add_event(struct qede_dev *edev,
if (edev->rdma_info.exp_recovery)
return;
- if (!edev->rdma_info.qedr_dev)
+ if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
return;
/* We don't want the cleanup flow to start while we're allocating and
@@ -362,3 +348,8 @@ void qede_rdma_event_changeaddr(struct qede_dev *edev)
{
qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
}
+
+void qede_rdma_event_change_mtu(struct qede_dev *edev)
+{
+ qede_rdma_add_event(edev, QEDE_CHANGE_MTU);
+}