aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c733
1 files changed, 608 insertions, 125 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5b190c257124..082825e3cb39 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -8,6 +8,7 @@
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
+#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
@@ -133,38 +134,24 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
static int ice_init_mac_fltr(struct ice_pf *pf)
{
enum ice_status status;
- u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
+ u8 *perm_addr;
vsi = ice_get_main_vsi(pf);
if (!vsi)
return -EINVAL;
- /* To add a MAC filter, first add the MAC to a list and then
- * pass the list to ice_add_mac.
- */
-
- /* Add a unicast MAC filter so the VSI can get its packets */
- status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true);
- if (status)
- goto unregister;
-
- /* VSI needs to receive broadcast traffic, so add the broadcast
- * MAC address to the list as well.
- */
- eth_broadcast_addr(broadcast);
- status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true);
- if (status)
- goto unregister;
+ perm_addr = vsi->port_info->mac.perm_addr;
+ status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
+ if (!status)
+ return 0;
- return 0;
-unregister:
/* We aren't useful with no MAC filters, so unregister if we
* had an error
*/
- if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
- dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n",
- status);
+ if (vsi->netdev->reg_state == NETREG_REGISTERED) {
+ dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
+ ice_stat_str(status));
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
@@ -188,7 +175,8 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
+ if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
+ ICE_FWD_TO_VSI))
return -EINVAL;
return 0;
@@ -209,7 +197,8 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
+ if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
+ ICE_FWD_TO_VSI))
return -EINVAL;
return 0;
@@ -307,8 +296,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Remove MAC addresses in the unsync list */
- status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
- ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
+ status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
+ ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
if (status) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
@@ -319,8 +308,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Add MAC addresses in the sync list */
- status = ice_add_mac(hw, &vsi->tmp_sync_list);
- ice_free_fltr_list(dev, &vsi->tmp_sync_list);
+ status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
+ ice_fltr_free_list(dev, &vsi->tmp_sync_list);
/* If filter is added successfully or already exists, do not go into
* 'if' condition and report it as error. Instead continue processing
* rest of the function.
@@ -357,7 +346,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
goto out_promisc;
}
- } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ } else {
+ /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
if (vsi->vlan_ena)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
@@ -462,7 +452,7 @@ static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- int i;
+ unsigned int i;
/* already prepared for reset */
if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
@@ -1017,8 +1007,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ret == ICE_ERR_AQ_NO_WORK)
break;
if (ret) {
- dev_err(dev, "%s Receive Queue event error %d\n", qtype,
- ret);
+ dev_err(dev, "%s Receive Queue event error %s\n", qtype,
+ ice_stat_str(ret));
break;
}
@@ -1123,7 +1113,7 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
*
* If not already scheduled, this puts the task into the work queue.
*/
-static void ice_service_task_schedule(struct ice_pf *pf)
+void ice_service_task_schedule(struct ice_pf *pf)
{
if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
!test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
@@ -1198,8 +1188,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
+ unsigned int i;
u32 reg;
- int i;
if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
/* Since the VF MDD event logging is rate limited, check if
@@ -1332,8 +1322,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* PF can be configured to reset the VF through ethtool
* private flag mdd-auto-reset-vf.
*/
- if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
+ /* VF MDD event counters will be cleared by
+ * reset, so print the event prior to reset.
+ */
+ ice_print_vf_rx_mdd_event(vf);
ice_reset_vf(&pf->vf[i], false);
+ }
}
}
@@ -1493,7 +1488,7 @@ static void ice_service_task(struct work_struct *work)
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
-
+ ice_sync_arfs_fltrs(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
@@ -1652,9 +1647,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
}
/* register for affinity change notifications */
- q_vector->affinity_notify.notify = ice_irq_affinity_notify;
- q_vector->affinity_notify.release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
+ struct irq_affinity_notify *affinity_notify;
+
+ affinity_notify = &q_vector->affinity_notify;
+ affinity_notify->notify = ice_irq_affinity_notify;
+ affinity_notify->release = ice_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, affinity_notify);
+ }
/* assign the mask for this irq */
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
@@ -1666,8 +1666,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector) {
vector--;
- irq_num = pf->msix_entries[base + vector].vector,
- irq_set_affinity_notifier(irq_num, NULL);
+ irq_num = pf->msix_entries[base + vector].vector;
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+ irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
@@ -1809,8 +1810,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n",
- status);
+ dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
+ ice_stat_str(status));
goto clear_xdp_rings;
}
ice_vsi_assign_bpf_prog(vsi, prog);
@@ -1898,6 +1899,9 @@ free_qmap:
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
+ /* change number of XDP Tx queues to 0 */
+ vsi->num_xdp_txq = 0;
+
return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
}
@@ -1931,7 +1935,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
if (!ice_is_xdp_ena_vsi(vsi) && prog) {
- vsi->num_xdp_txq = vsi->alloc_txq;
+ vsi->num_xdp_txq = vsi->alloc_rxq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
@@ -2137,10 +2141,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
ret = IRQ_HANDLED;
- if (!test_bit(__ICE_DOWN, pf->state)) {
- ice_service_task_schedule(pf);
- ice_irq_dynamic_ena(hw, NULL, NULL);
- }
+ ice_service_task_schedule(pf);
+ ice_irq_dynamic_ena(hw, NULL, NULL);
return ret;
}
@@ -2247,7 +2249,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
return oicr_idx;
pf->num_avail_sw_msix -= 1;
- pf->oicr_idx = oicr_idx;
+ pf->oicr_idx = (u16)oicr_idx;
err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
@@ -2331,6 +2333,7 @@ static void ice_set_netdev_features(struct net_device *netdev)
dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA |
+ NETIF_F_NTUPLE |
NETIF_F_RXHASH;
csumo_features = NETIF_F_RXCSUM |
@@ -2342,13 +2345,27 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- tso_features = NETIF_F_TSO |
+ tso_features = NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_L4;
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
/* set features that user can change */
netdev->hw_features = dflt_features | csumo_features |
vlano_features | tso_features;
+ /* add support for HW_CSUM on packets with MPLS header */
+ netdev->mpls_features = NETIF_F_HW_CSUM;
+
/* enable features */
netdev->features |= netdev->hw_features;
/* encap and VLAN devices inherit default, csumo and tso features */
@@ -2411,7 +2428,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
err = register_netdev(vsi->netdev);
if (err)
- goto err_destroy_devlink_port;
+ goto err_free_netdev;
devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
@@ -2422,9 +2439,11 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
return 0;
+err_free_netdev:
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
err_destroy_devlink_port:
ice_devlink_destroy_port(pf);
-
return err;
}
@@ -2457,6 +2476,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
+ * ice_ctrl_vsi_setup - Set up a control VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ *
+ * Returns pointer to the successfully allocated VSI software struct
+ * on success, otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
+}
+
+/**
* ice_lb_vsi_setup - Set up a loopback VSI
* @pf: board private structure
* @pi: pointer to the port_info instance
@@ -2509,7 +2542,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
- ret = ice_vsi_add_vlan(vsi, vid);
+ ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!ret) {
vsi->vlan_ena = true;
set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
@@ -2594,12 +2627,22 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
*/
ice_napi_add(vsi);
+ status = ice_set_cpu_rx_rmap(vsi);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
+ vsi->vsi_num, status);
+ status = -EINVAL;
+ goto unroll_napi_add;
+ }
status = ice_init_mac_fltr(pf);
if (status)
- goto unroll_napi_add;
+ goto free_cpu_rx_map;
return status;
+free_cpu_rx_map:
+ ice_free_cpu_rx_rmap(vsi);
+
unroll_napi_add:
if (vsi) {
ice_napi_del(vsi);
@@ -2630,7 +2673,8 @@ unroll_vsi_setup:
static u16
ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
{
- u16 count = 0, bit;
+ unsigned long bit;
+ u16 count = 0;
mutex_lock(lock);
for_each_clear_bit(bit, pf_qmap, size)
@@ -2703,6 +2747,23 @@ static void ice_set_pf_caps(struct ice_pf *pf)
if (func_caps->common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
+ clear_bit(ICE_FLAG_FD_ENA, pf->flags);
+ if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
+ u16 unused;
+
+ /* ctrl_vsi_idx will be set to a valid value when flow director
+ * is setup by ice_init_fdir
+ */
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ set_bit(ICE_FLAG_FD_ENA, pf->flags);
+ /* force guaranteed filter pool for PF */
+ ice_alloc_fd_guar_item(&pf->hw, &unused,
+ func_caps->fd_fltr_guar);
+ /* force shared filter pool for PF */
+ ice_alloc_fd_shrd_item(&pf->hw, &unused,
+ func_caps->fd_fltr_best_effort);
+ }
+
pf->max_pf_txqs = func_caps->common_cap.num_txq;
pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
@@ -2769,6 +2830,15 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_budget += needed;
v_left -= needed;
+ /* reserve one vector for flow director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ needed = ICE_FDIR_MSIX;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ v_budget += needed;
+ v_left -= needed;
+ }
+
pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
@@ -2793,8 +2863,10 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
-/* 2 vectors for LAN (traffic + OICR) */
+/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
#define ICE_MIN_LAN_VECS 2
+#define ICE_MIN_RDMA_VECS 2
+#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
if (v_actual < ICE_MIN_LAN_VECS) {
/* error if we can't get minimum vectors */
@@ -2869,8 +2941,8 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/* populate SW interrupts pool with number of OS granted IRQs. */
- pf->num_avail_sw_msix = vectors;
- pf->irq_tracker->num_entries = vectors;
+ pf->num_avail_sw_msix = (u16)vectors;
+ pf->irq_tracker->num_entries = (u16)vectors;
pf->irq_tracker->end = pf->irq_tracker->num_entries;
return 0;
@@ -2902,9 +2974,9 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
}
if (new_tx)
- vsi->req_txq = new_tx;
+ vsi->req_txq = (u16)new_tx;
if (new_rx)
- vsi->req_rxq = new_rx;
+ vsi->req_rxq = (u16)new_rx;
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
@@ -2985,6 +3057,9 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
*status = ICE_ERR_NOT_SUPPORTED;
}
break;
+ case ICE_ERR_FW_DDP_MISMATCH:
+ dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
+ break;
case ICE_ERR_BUF_TOO_SHORT:
case ICE_ERR_CFG:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
@@ -3013,6 +3088,9 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
case ICE_AQ_RC_EBADMAN:
case ICE_AQ_RC_EBADBUF:
dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ /* poll for reset to complete */
+ if (ice_check_reset(hw))
+ dev_err(dev, "Error resetting device. Please reload the driver\n");
return;
default:
break;
@@ -3100,6 +3178,53 @@ static enum ice_status ice_send_version(struct ice_pf *pf)
}
/**
+ * ice_init_fdir - Initialize flow director VSI and configuration
+ * @pf: pointer to the PF instance
+ *
+ * returns 0 on success, negative on error
+ */
+static int ice_init_fdir(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *ctrl_vsi;
+ int err;
+
+ /* Side Band Flow Director needs to have a control VSI.
+ * Allocate it and store it in the PF.
+ */
+ ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
+ if (!ctrl_vsi) {
+ dev_dbg(dev, "could not create control VSI\n");
+ return -ENOMEM;
+ }
+
+ err = ice_vsi_open_ctrl(ctrl_vsi);
+ if (err) {
+ dev_dbg(dev, "could not open control VSI\n");
+ goto err_vsi_open;
+ }
+
+ mutex_init(&pf->hw.fdir_fltr_lock);
+
+ err = ice_fdir_create_dflt_rules(pf);
+ if (err)
+ goto err_fdir_rule;
+
+ return 0;
+
+err_fdir_rule:
+ ice_fdir_release_flows(&pf->hw);
+ ice_vsi_close(ctrl_vsi);
+err_vsi_open:
+ ice_vsi_release(ctrl_vsi);
+ if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
+ pf->vsi[pf->ctrl_vsi_idx] = NULL;
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ }
+ return err;
+}
+
+/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @pf: pointer to the PF instance
*/
@@ -3123,7 +3248,7 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
if (!opt_fw_filename)
return NULL;
- snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg",
+ snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
ICE_DDP_PKG_PATH, dsn);
return opt_fw_filename;
@@ -3295,12 +3420,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_interrupt_unroll;
+ goto err_init_vsi_unroll;
}
- /* Driver is mostly up */
- clear_bit(__ICE_DOWN, pf->state);
-
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
@@ -3356,12 +3478,16 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_verify_cacheline_size(pf);
- /* If no DDP driven features have to be setup, return here */
+ /* If no DDP driven features have to be setup, we are done with probe */
if (ice_is_safe_mode(pf))
- return 0;
+ goto probe_done;
/* initialize DDP driven features */
+ /* Note: Flow director init failure is non-fatal to load */
+ if (ice_init_fdir(pf))
+ dev_err(dev, "could not initialize flow director\n");
+
/* Note: DCB init failure is non-fatal to load */
if (ice_init_pf_dcb(pf, false)) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
@@ -3373,6 +3499,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* print PCI link speed and width */
pcie_print_link_status(pf->pdev);
+probe_done:
+ /* ready to go, so clear down state bit */
+ clear_bit(__ICE_DOWN, pf->state);
return 0;
err_alloc_sw_unroll:
@@ -3384,6 +3513,7 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
+err_init_vsi_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
@@ -3421,6 +3551,9 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+ if (!ice_is_safe_mode(pf))
+ ice_remove_arfs(pf);
ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
@@ -3705,25 +3838,24 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
- /* When we change the MAC address we also have to change the MAC address
- * based filter rules that were created previously for the old MAC
- * address. So first, we remove the old filter rule using ice_remove_mac
- * and then create a new filter rule using ice_add_mac via
- * ice_vsi_cfg_mac_fltr function call for both add and/or remove
- * filters.
- */
- status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false);
- if (status) {
+ /* Clean up old MAC filter. Not an error if old filter doesn't exist */
+ status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
+ if (status && status != ICE_ERR_DOES_NOT_EXIST) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
}
- status = ice_vsi_cfg_mac_fltr(vsi, mac, true);
- if (status) {
- err = -EADDRNOTAVAIL;
- goto err_update_filters;
+ /* Add filter for new MAC. If filter exists, just return success */
+ status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
+ return 0;
}
+ /* error if the new filter addition failed */
+ if (status)
+ err = -EADDRNOTAVAIL;
+
err_update_filters:
if (err) {
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
@@ -3740,8 +3872,8 @@ err_update_filters:
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
- mac, status);
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
+ mac, ice_stat_str(status));
}
return 0;
}
@@ -3805,8 +3937,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
if (status) {
- netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
- status);
+ netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
+ ice_stat_str(status));
return -EIO;
}
@@ -3938,6 +4070,16 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
ret = ice_cfg_vlan_pruning(vsi, false, false);
+ if ((features & NETIF_F_NTUPLE) &&
+ !(netdev->features & NETIF_F_NTUPLE)) {
+ ice_vsi_manage_fdir(vsi, true);
+ ice_init_arfs(vsi);
+ } else if (!(features & NETIF_F_NTUPLE) &&
+ (netdev->features & NETIF_F_NTUPLE)) {
+ ice_vsi_manage_fdir(vsi, false);
+ ice_clear_arfs(vsi);
+ }
+
return ret;
}
@@ -4084,6 +4226,33 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
}
/**
+ * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
+ * @vsi: the VSI to be updated
+ * @rings: rings to work on
+ * @count: number of rings
+ */
+static void
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
+ u16 count)
+{
+ struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+ u16 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_ring *ring;
+ u64 pkts, bytes;
+
+ ring = READ_ONCE(rings[i]);
+ ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ vsi_stats->tx_packets += pkts;
+ vsi_stats->tx_bytes += bytes;
+ vsi->tx_restart += ring->tx_stats.restart_q;
+ vsi->tx_busy += ring->tx_stats.tx_busy;
+ vsi->tx_linearize += ring->tx_stats.tx_linearize;
+ }
+}
+
+/**
* ice_update_vsi_ring_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
@@ -4110,15 +4279,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_lock();
/* update Tx rings counters */
- ice_for_each_txq(vsi, i) {
- ring = READ_ONCE(vsi->tx_rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
- vsi_stats->tx_packets += pkts;
- vsi_stats->tx_bytes += bytes;
- vsi->tx_restart += ring->tx_stats.restart_q;
- vsi->tx_busy += ring->tx_stats.tx_busy;
- vsi->tx_linearize += ring->tx_stats.tx_linearize;
- }
+ ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
@@ -4130,6 +4291,11 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
}
+ /* update XDP Tx rings counters */
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
+ vsi->num_xdp_txq);
+
rcu_read_unlock();
}
@@ -4162,7 +4328,13 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF) {
cur_ns->rx_crc_errors = pf->stats.crc_errors;
cur_ns->rx_errors = pf->stats.crc_errors +
- pf->stats.illegal_bytes;
+ pf->stats.illegal_bytes +
+ pf->stats.rx_len_errors +
+ pf->stats.rx_undersize +
+ pf->hw_csum_rx_error +
+ pf->stats.rx_jabber +
+ pf->stats.rx_fragments +
+ pf->stats.rx_oversize;
cur_ns->rx_length_errors = pf->stats.rx_len_errors;
/* record drops from the port level */
cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
@@ -4177,6 +4349,7 @@ void ice_update_pf_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
+ u16 fd_ctr_base;
u8 port;
port = hw->port_info->lport;
@@ -4265,6 +4438,12 @@ void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_big, &cur_ps->tx_size_big);
+ fd_ctr_base = hw->fd_ctr_base;
+
+ ice_stat_update40(hw,
+ GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
+ pf->stat_prev_loaded, &prev_ps->fd_sb_match,
+ &cur_ps->fd_sb_match);
ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
&prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
@@ -4308,6 +4487,8 @@ void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
&prev_ps->rx_jabber, &cur_ps->rx_jabber);
+ cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
+
pf->stat_prev_loaded = true;
}
@@ -4493,6 +4674,62 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_open_ctrl - open control VSI for use
+ * @vsi: the VSI to open
+ *
+ * Initialization of the Control VSI
+ *
+ * Returns 0 on success, negative value on error
+ */
+int ice_vsi_open_ctrl(struct ice_vsi *vsi)
+{
+ char int_name[ICE_INT_NAME_STR_LEN];
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ /* allocate descriptors */
+ err = ice_vsi_setup_tx_rings(vsi);
+ if (err)
+ goto err_setup_tx;
+
+ err = ice_vsi_setup_rx_rings(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ err = ice_vsi_cfg(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
+ dev_driver_string(dev), dev_name(dev));
+ err = ice_vsi_req_irq_msix(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ ice_vsi_cfg_msix(vsi);
+
+ err = ice_vsi_start_all_rx_rings(vsi);
+ if (err)
+ goto err_up_complete;
+
+ clear_bit(__ICE_DOWN, vsi->state);
+ ice_vsi_ena_irq(vsi);
+
+ return 0;
+
+err_up_complete:
+ ice_down(vsi);
+err_setup_rx:
+ ice_vsi_free_rx_rings(vsi);
+err_setup_tx:
+ ice_vsi_free_tx_rings(vsi);
+
+ return err;
+}
+
+/**
* ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open
*
@@ -4604,8 +4841,9 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* replay filters for the VSI */
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
- dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n",
- status, vsi->idx, ice_vsi_type_str(type));
+ dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
+ ice_stat_str(status), vsi->idx,
+ ice_vsi_type_str(type));
return -EIO;
}
@@ -4659,6 +4897,11 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
* ice_rebuild - rebuild after reset
* @pf: PF to rebuild
* @reset_type: type of reset
+ *
+ * Do not rebuild VF VSI in this flow because that is already handled via
+ * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
+ * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
+ * to reset/rebuild all the VF VSI twice.
*/
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
@@ -4674,7 +4917,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_init_all_ctrlq(hw);
if (ret) {
- dev_err(dev, "control queues init failed %d\n", ret);
+ dev_err(dev, "control queues init failed %s\n",
+ ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4690,7 +4934,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_clear_pf_cfg(hw);
if (ret) {
- dev_err(dev, "clear PF configuration failed %d\n", ret);
+ dev_err(dev, "clear PF configuration failed %s\n",
+ ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4704,7 +4949,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_get_caps(hw);
if (ret) {
- dev_err(dev, "ice_get_caps failed %d\n", ret);
+ dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
+ goto err_init_ctrlq;
+ }
+
+ ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (ret) {
+ dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4723,6 +4974,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_sched_init_port;
}
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
+ if (!rd32(hw, PFQF_FD_SIZE)) {
+ u16 unused, guar, b_effort;
+
+ guar = hw->func_caps.fd_fltr_guar;
+ b_effort = hw->func_caps.fd_fltr_best_effort;
+
+ /* force guaranteed filter pool for PF */
+ ice_alloc_fd_guar_item(hw, &unused, guar);
+ /* force shared filter pool for PF */
+ ice_alloc_fd_shrd_item(hw, &unused, b_effort);
+ }
+ }
+
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
@@ -4733,12 +4999,22 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
- err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF);
+ /* If Flow Director is active */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
if (err) {
- dev_err(dev, "VF VSI rebuild failed: %d\n", err);
+ dev_err(dev, "control VSI rebuild failed: %d\n", err);
goto err_vsi_rebuild;
}
+
+ /* replay HW Flow Director recipes */
+ if (hw->fdir_prof)
+ ice_fdir_replay_flows(hw);
+
+ /* replay Flow Director filters */
+ ice_fdir_replay_fltrs(pf);
+
+ ice_rebuild_arfs(pf);
}
ice_update_pf_netdev_link(pf);
@@ -4746,8 +5022,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* tell the firmware we are up */
ret = ice_send_version(pf);
if (ret) {
- dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
- ret);
+ dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
+ ice_stat_str(ret));
goto err_vsi_rebuild;
}
@@ -4795,7 +5071,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_pf *pf = vsi->back;
u8 count = 0;
- if (new_mtu == netdev->mtu) {
+ if (new_mtu == (int)netdev->mtu) {
netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
return 0;
}
@@ -4810,11 +5086,11 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
}
- if (new_mtu < netdev->min_mtu) {
+ if (new_mtu < (int)netdev->min_mtu) {
netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
- } else if (new_mtu > netdev->max_mtu) {
+ } else if (new_mtu > (int)netdev->max_mtu) {
netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
@@ -4835,7 +5111,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- netdev->mtu = new_mtu;
+ netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
@@ -4859,6 +5135,118 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * ice_aq_str - convert AQ err code to a string
+ * @aq_err: the AQ error code to convert
+ */
+const char *ice_aq_str(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_OK:
+ return "OK";
+ case ICE_AQ_RC_EPERM:
+ return "ICE_AQ_RC_EPERM";
+ case ICE_AQ_RC_ENOENT:
+ return "ICE_AQ_RC_ENOENT";
+ case ICE_AQ_RC_ENOMEM:
+ return "ICE_AQ_RC_ENOMEM";
+ case ICE_AQ_RC_EBUSY:
+ return "ICE_AQ_RC_EBUSY";
+ case ICE_AQ_RC_EEXIST:
+ return "ICE_AQ_RC_EEXIST";
+ case ICE_AQ_RC_EINVAL:
+ return "ICE_AQ_RC_EINVAL";
+ case ICE_AQ_RC_ENOSPC:
+ return "ICE_AQ_RC_ENOSPC";
+ case ICE_AQ_RC_ENOSYS:
+ return "ICE_AQ_RC_ENOSYS";
+ case ICE_AQ_RC_EMODE:
+ return "ICE_AQ_RC_EMODE";
+ case ICE_AQ_RC_ENOSEC:
+ return "ICE_AQ_RC_ENOSEC";
+ case ICE_AQ_RC_EBADSIG:
+ return "ICE_AQ_RC_EBADSIG";
+ case ICE_AQ_RC_ESVN:
+ return "ICE_AQ_RC_ESVN";
+ case ICE_AQ_RC_EBADMAN:
+ return "ICE_AQ_RC_EBADMAN";
+ case ICE_AQ_RC_EBADBUF:
+ return "ICE_AQ_RC_EBADBUF";
+ }
+
+ return "ICE_AQ_RC_UNKNOWN";
+}
+
+/**
+ * ice_stat_str - convert status err code to a string
+ * @stat_err: the status error code to convert
+ */
+const char *ice_stat_str(enum ice_status stat_err)
+{
+ switch (stat_err) {
+ case ICE_SUCCESS:
+ return "OK";
+ case ICE_ERR_PARAM:
+ return "ICE_ERR_PARAM";
+ case ICE_ERR_NOT_IMPL:
+ return "ICE_ERR_NOT_IMPL";
+ case ICE_ERR_NOT_READY:
+ return "ICE_ERR_NOT_READY";
+ case ICE_ERR_NOT_SUPPORTED:
+ return "ICE_ERR_NOT_SUPPORTED";
+ case ICE_ERR_BAD_PTR:
+ return "ICE_ERR_BAD_PTR";
+ case ICE_ERR_INVAL_SIZE:
+ return "ICE_ERR_INVAL_SIZE";
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ return "ICE_ERR_DEVICE_NOT_SUPPORTED";
+ case ICE_ERR_RESET_FAILED:
+ return "ICE_ERR_RESET_FAILED";
+ case ICE_ERR_FW_API_VER:
+ return "ICE_ERR_FW_API_VER";
+ case ICE_ERR_NO_MEMORY:
+ return "ICE_ERR_NO_MEMORY";
+ case ICE_ERR_CFG:
+ return "ICE_ERR_CFG";
+ case ICE_ERR_OUT_OF_RANGE:
+ return "ICE_ERR_OUT_OF_RANGE";
+ case ICE_ERR_ALREADY_EXISTS:
+ return "ICE_ERR_ALREADY_EXISTS";
+ case ICE_ERR_NVM_CHECKSUM:
+ return "ICE_ERR_NVM_CHECKSUM";
+ case ICE_ERR_BUF_TOO_SHORT:
+ return "ICE_ERR_BUF_TOO_SHORT";
+ case ICE_ERR_NVM_BLANK_MODE:
+ return "ICE_ERR_NVM_BLANK_MODE";
+ case ICE_ERR_IN_USE:
+ return "ICE_ERR_IN_USE";
+ case ICE_ERR_MAX_LIMIT:
+ return "ICE_ERR_MAX_LIMIT";
+ case ICE_ERR_RESET_ONGOING:
+ return "ICE_ERR_RESET_ONGOING";
+ case ICE_ERR_HW_TABLE:
+ return "ICE_ERR_HW_TABLE";
+ case ICE_ERR_DOES_NOT_EXIST:
+ return "ICE_ERR_DOES_NOT_EXIST";
+ case ICE_ERR_FW_DDP_MISMATCH:
+ return "ICE_ERR_FW_DDP_MISMATCH";
+ case ICE_ERR_AQ_ERROR:
+ return "ICE_ERR_AQ_ERROR";
+ case ICE_ERR_AQ_TIMEOUT:
+ return "ICE_ERR_AQ_TIMEOUT";
+ case ICE_ERR_AQ_FULL:
+ return "ICE_ERR_AQ_FULL";
+ case ICE_ERR_AQ_NO_WORK:
+ return "ICE_ERR_AQ_NO_WORK";
+ case ICE_ERR_AQ_EMPTY:
+ return "ICE_ERR_AQ_EMPTY";
+ case ICE_ERR_AQ_FW_CRITICAL:
+ return "ICE_ERR_AQ_FW_CRITICAL";
+ }
+
+ return "ICE_ERR_UNKNOWN";
+}
+
+/**
* ice_set_rss - Set RSS keys and lut
* @vsi: Pointer to VSI structure
* @seed: RSS hash seed
@@ -4882,8 +5270,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4892,8 +5281,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4924,8 +5314,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4934,8 +5325,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -5002,8 +5394,9 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
- bmode, status, hw->adminq.sq_last_status);
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
+ bmode, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -5072,8 +5465,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/
status = ice_update_sw_rule_bridge_mode(hw);
if (status) {
- netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
- mode, status, hw->adminq.sq_last_status);
+ netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
+ mode, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
return -EIO;
@@ -5100,6 +5494,16 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
pf->tx_timeout_count++;
+ /* Check if PFC is enabled for the TC to which the queue belongs
+ * to. If yes then Tx timeout is not caused by a hung queue, no
+ * need to reset and rebuild
+ */
+ if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
+ dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
+ txqueue);
+ return;
+ }
+
/* now that we have an index, find the tx_ring struct */
for (i = 0; i < vsi->num_txq; i++)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
@@ -5158,6 +5562,70 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
+ * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
+ * @netdev: This physical port's netdev
+ * @ti: Tunnel endpoint information
+ */
+static void
+ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ u16 port = ntohs(ti->port);
+ enum ice_status status;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ tnl_type = TNL_VXLAN;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ tnl_type = TNL_GENEVE;
+ break;
+ default:
+ netdev_err(netdev, "Unknown tunnel type\n");
+ return;
+ }
+
+ status = ice_create_tunnel(&pf->hw, tnl_type, port);
+ if (status == ICE_ERR_OUT_OF_RANGE)
+ netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
+ port);
+ else if (status)
+ netdev_err(netdev, "Error adding UDP tunnel - %s\n",
+ ice_stat_str(status));
+}
+
+/**
+ * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
+ * @netdev: This physical port's netdev
+ * @ti: Tunnel endpoint information
+ */
+static void
+ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 port = ntohs(ti->port);
+ enum ice_status status;
+ bool retval;
+
+ retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
+ if (!retval) {
+ netdev_info(netdev, "port %d not found in UDP tunnels list\n",
+ port);
+ return;
+ }
+
+ status = ice_destroy_tunnel(&pf->hw, port, false);
+ if (status)
+ netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
+ port);
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -5173,14 +5641,20 @@ int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
int err;
- if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
+ if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
return -EIO;
}
+ if (test_bit(__ICE_DOWN, pf->state)) {
+ netdev_err(netdev, "device is not ready yet\n");
+ return -EBUSY;
+ }
+
netif_carrier_off(netdev);
pi = vsi->port_info;
@@ -5213,6 +5687,10 @@ int ice_open(struct net_device *netdev)
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
+
+ /* Update existing tunnels information */
+ udp_tunnel_get_rx_info(netdev);
+
return err;
}
@@ -5263,21 +5741,21 @@ ice_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
len = skb_network_header(skb) - skb->data;
- if (len & ~(ICE_TXD_MACLEN_MAX))
+ if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_transport_header(skb) - skb_network_header(skb);
- if (len & ~(ICE_TXD_IPLEN_MAX))
+ if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len & ~(ICE_TXD_L4LEN_MAX))
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_inner_transport_header(skb) -
skb_inner_network_header(skb);
- if (len & ~(ICE_TXD_IPLEN_MAX))
+ if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
@@ -5322,8 +5800,13 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bridge_setlink = ice_bridge_setlink,
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = ice_rx_flow_steer,
+#endif
.ndo_tx_timeout = ice_tx_timeout,
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_udp_tunnel_add = ice_udp_tunnel_add,
+ .ndo_udp_tunnel_del = ice_udp_tunnel_del,
};